]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9.1-3.4.3-201206171836.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.4.3-201206171836.patch
CommitLineData
8a60f200
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index b4a898f..830febf 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,9 +51,11 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37@@ -69,6 +74,7 @@ Image
38 Module.markers
39 Module.symvers
40 PENDING
41+PERF*
42 SCCS
43 System.map*
44 TAGS
45@@ -80,6 +86,7 @@ aic7*seq.h*
46 aicasm
47 aicdb.h*
48 altivec*.c
49+ashldi3.S
50 asm-offsets.h
51 asm_offsets.h
52 autoconf.h*
53@@ -92,19 +99,24 @@ bounds.h
54 bsetup
55 btfixupprep
56 build
57+builtin-policy.h
58 bvmlinux
59 bzImage*
60 capability_names.h
61 capflags.c
62 classlist.h*
63+clut_vga16.c
64+common-cmds.h
65 comp*.log
66 compile.h*
67 conf
68 config
69 config-*
70 config_data.h*
71+config.c
72 config.mak
73 config.mak.autogen
74+config.tmp
75 conmakehash
76 consolemap_deftbl.c*
77 cpustr.h
78@@ -115,9 +127,11 @@ devlist.h*
79 dnotify_test
80 docproc
81 dslm
82+dtc-lexer.lex.c
83 elf2ecoff
84 elfconfig.h*
85 evergreen_reg_safe.h
86+exception_policy.conf
87 fixdep
88 flask.h
89 fore200e_mkfirm
90@@ -125,12 +139,15 @@ fore200e_pca_fw.c*
91 gconf
92 gconf.glade.h
93 gen-devlist
94+gen-kdb_cmds.c
95 gen_crc32table
96 gen_init_cpio
97 generated
98 genheaders
99 genksyms
100 *_gray256.c
101+hash
102+hid-example
103 hpet_example
104 hugepage-mmap
105 hugepage-shm
106@@ -145,7 +162,7 @@ int32.c
107 int4.c
108 int8.c
109 kallsyms
110-kconfig
111+kern_constants.h
112 keywords.c
113 ksym.c*
114 ksym.h*
115@@ -153,7 +170,7 @@ kxgettext
116 lkc_defs.h
117 lex.c
118 lex.*.c
119-linux
120+lib1funcs.S
121 logo_*.c
122 logo_*_clut224.c
123 logo_*_mono.c
124@@ -164,14 +181,15 @@ machtypes.h
125 map
126 map_hugetlb
127 maui_boot.h
128-media
129 mconf
130+mdp
131 miboot*
132 mk_elfconfig
133 mkboot
134 mkbugboot
135 mkcpustr
136 mkdep
137+mkpiggy
138 mkprep
139 mkregtable
140 mktables
141@@ -188,6 +206,7 @@ oui.c*
142 page-types
143 parse.c
144 parse.h
145+parse-events*
146 patches*
147 pca200e.bin
148 pca200e_ecd.bin2
149@@ -197,6 +216,7 @@ perf-archive
150 piggyback
151 piggy.gzip
152 piggy.S
153+pmu-*
154 pnmtologo
155 ppc_defs.h*
156 pss_boot.h
157@@ -207,6 +227,7 @@ r300_reg_safe.h
158 r420_reg_safe.h
159 r600_reg_safe.h
160 recordmcount
161+regdb.c
162 relocs
163 rlim_names.h
164 rn50_reg_safe.h
165@@ -217,6 +238,7 @@ setup
166 setup.bin
167 setup.elf
168 sImage
169+slabinfo
170 sm_tbl*
171 split-include
172 syscalltab.h
173@@ -227,6 +249,7 @@ tftpboot.img
174 timeconst.h
175 times.h*
176 trix_boot.h
177+user_constants.h
178 utsrelease.h*
179 vdso-syms.lds
180 vdso.lds
181@@ -238,13 +261,17 @@ vdso32.lds
182 vdso32.so.dbg
183 vdso64.lds
184 vdso64.so.dbg
185+vdsox32.lds
186+vdsox32-syms.lds
187 version.h*
188 vmImage
189 vmlinux
190 vmlinux-*
191 vmlinux.aout
192 vmlinux.bin.all
193+vmlinux.bin.bz2
194 vmlinux.lds
195+vmlinux.relocs
196 vmlinuz
197 voffset.h
198 vsyscall.lds
199@@ -252,9 +279,11 @@ vsyscall_32.lds
200 wanxlfw.inc
201 uImage
202 unifdef
203+utsrelease.h
204 wakeup.bin
205 wakeup.elf
206 wakeup.lds
207 zImage*
208 zconf.hash.c
209+zconf.lex.c
210 zoffset.h
211diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
212index c1601e5..08557ce 100644
213--- a/Documentation/kernel-parameters.txt
214+++ b/Documentation/kernel-parameters.txt
215@@ -2021,6 +2021,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
216 the specified number of seconds. This is to be used if
217 your oopses keep scrolling off the screen.
218
219+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
220+ virtualization environments that don't cope well with the
221+ expand down segment used by UDEREF on X86-32 or the frequent
222+ page table updates on X86-64.
223+
224+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
225+
226 pcbit= [HW,ISDN]
227
228 pcd. [PARIDE]
229diff --git a/Makefile b/Makefile
230index a0804c6..f487027 100644
231--- a/Makefile
232+++ b/Makefile
233@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
234
235 HOSTCC = gcc
236 HOSTCXX = g++
237-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
238-HOSTCXXFLAGS = -O2
239+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
240+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
241+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
242
243 # Decide whether to build built-in, modular, or both.
244 # Normally, just do built-in.
245@@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
246 # Rules shared between *config targets and build targets
247
248 # Basic helpers built in scripts/
249-PHONY += scripts_basic
250-scripts_basic:
251+PHONY += scripts_basic gcc-plugins
252+scripts_basic: gcc-plugins
253 $(Q)$(MAKE) $(build)=scripts/basic
254 $(Q)rm -f .tmp_quiet_recordmcount
255
256@@ -564,6 +565,56 @@ else
257 KBUILD_CFLAGS += -O2
258 endif
259
260+ifndef DISABLE_PAX_PLUGINS
261+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
262+ifneq ($(PLUGINCC),)
263+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
264+ifndef CONFIG_UML
265+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
266+endif
267+endif
268+ifdef CONFIG_PAX_MEMORY_STACKLEAK
269+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
270+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
271+endif
272+ifdef CONFIG_KALLOCSTAT_PLUGIN
273+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
274+endif
275+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
276+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
277+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
278+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
279+endif
280+ifdef CONFIG_CHECKER_PLUGIN
281+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
282+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
283+endif
284+endif
285+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
286+ifdef CONFIG_PAX_SIZE_OVERFLOW
287+SIZE_OVERFLOW_PLUGIN := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
288+endif
289+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
290+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS) $(SIZE_OVERFLOW_PLUGIN)
291+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
292+export PLUGINCC CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN SIZE_OVERFLOW_PLUGIN
293+ifeq ($(KBUILD_EXTMOD),)
294+gcc-plugins:
295+ $(Q)$(MAKE) $(build)=tools/gcc
296+else
297+gcc-plugins: ;
298+endif
299+else
300+gcc-plugins:
301+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
302+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
303+else
304+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
305+endif
306+ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
307+endif
308+endif
309+
310 include $(srctree)/arch/$(SRCARCH)/Makefile
311
312 ifneq ($(CONFIG_FRAME_WARN),0)
313@@ -708,7 +759,7 @@ export mod_strip_cmd
314
315
316 ifeq ($(KBUILD_EXTMOD),)
317-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
318+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
319
320 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
321 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
322@@ -932,6 +983,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
323
324 # The actual objects are generated when descending,
325 # make sure no implicit rule kicks in
326+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
327+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
328 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
329
330 # Handle descending into subdirectories listed in $(vmlinux-dirs)
331@@ -941,7 +994,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
332 # Error messages still appears in the original language
333
334 PHONY += $(vmlinux-dirs)
335-$(vmlinux-dirs): prepare scripts
336+$(vmlinux-dirs): gcc-plugins prepare scripts
337 $(Q)$(MAKE) $(build)=$@
338
339 # Store (new) KERNELRELASE string in include/config/kernel.release
340@@ -985,6 +1038,7 @@ prepare0: archprepare FORCE
341 $(Q)$(MAKE) $(build)=.
342
343 # All the preparing..
344+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
345 prepare: prepare0
346
347 # Generate some files
348@@ -1092,6 +1146,8 @@ all: modules
349 # using awk while concatenating to the final file.
350
351 PHONY += modules
352+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
353+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
354 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
355 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
356 @$(kecho) ' Building modules, stage 2.';
357@@ -1107,7 +1163,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
358
359 # Target to prepare building external modules
360 PHONY += modules_prepare
361-modules_prepare: prepare scripts
362+modules_prepare: gcc-plugins prepare scripts
363
364 # Target to install modules
365 PHONY += modules_install
366@@ -1204,6 +1260,7 @@ distclean: mrproper
367 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
368 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
369 -o -name '.*.rej' \
370+ -o -name '.*.rej' -o -name '*.so' \
371 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
372 -type f -print | xargs rm -f
373
374@@ -1364,6 +1421,8 @@ PHONY += $(module-dirs) modules
375 $(module-dirs): crmodverdir $(objtree)/Module.symvers
376 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
377
378+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
379+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
380 modules: $(module-dirs)
381 @$(kecho) ' Building modules, stage 2.';
382 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
383@@ -1490,17 +1549,21 @@ else
384 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
385 endif
386
387-%.s: %.c prepare scripts FORCE
388+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
389+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
390+%.s: %.c gcc-plugins prepare scripts FORCE
391 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
392 %.i: %.c prepare scripts FORCE
393 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
394-%.o: %.c prepare scripts FORCE
395+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
396+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
397+%.o: %.c gcc-plugins prepare scripts FORCE
398 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
399 %.lst: %.c prepare scripts FORCE
400 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
401-%.s: %.S prepare scripts FORCE
402+%.s: %.S gcc-plugins prepare scripts FORCE
403 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
404-%.o: %.S prepare scripts FORCE
405+%.o: %.S gcc-plugins prepare scripts FORCE
406 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
407 %.symtypes: %.c prepare scripts FORCE
408 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
409@@ -1510,11 +1573,15 @@ endif
410 $(cmd_crmodverdir)
411 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
412 $(build)=$(build-dir)
413-%/: prepare scripts FORCE
414+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
415+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
416+%/: gcc-plugins prepare scripts FORCE
417 $(cmd_crmodverdir)
418 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
419 $(build)=$(build-dir)
420-%.ko: prepare scripts FORCE
421+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
422+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
423+%.ko: gcc-plugins prepare scripts FORCE
424 $(cmd_crmodverdir)
425 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
426 $(build)=$(build-dir) $(@:.ko=.o)
427diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
428index 3bb7ffe..347a54c 100644
429--- a/arch/alpha/include/asm/atomic.h
430+++ b/arch/alpha/include/asm/atomic.h
431@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
432 #define atomic_dec(v) atomic_sub(1,(v))
433 #define atomic64_dec(v) atomic64_sub(1,(v))
434
435+#define atomic64_read_unchecked(v) atomic64_read(v)
436+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
437+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
438+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
439+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
440+#define atomic64_inc_unchecked(v) atomic64_inc(v)
441+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
442+#define atomic64_dec_unchecked(v) atomic64_dec(v)
443+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
444+
445 #define smp_mb__before_atomic_dec() smp_mb()
446 #define smp_mb__after_atomic_dec() smp_mb()
447 #define smp_mb__before_atomic_inc() smp_mb()
448diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
449index ad368a9..fbe0f25 100644
450--- a/arch/alpha/include/asm/cache.h
451+++ b/arch/alpha/include/asm/cache.h
452@@ -4,19 +4,19 @@
453 #ifndef __ARCH_ALPHA_CACHE_H
454 #define __ARCH_ALPHA_CACHE_H
455
456+#include <linux/const.h>
457
458 /* Bytes per L1 (data) cache line. */
459 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
460-# define L1_CACHE_BYTES 64
461 # define L1_CACHE_SHIFT 6
462 #else
463 /* Both EV4 and EV5 are write-through, read-allocate,
464 direct-mapped, physical.
465 */
466-# define L1_CACHE_BYTES 32
467 # define L1_CACHE_SHIFT 5
468 #endif
469
470+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
471 #define SMP_CACHE_BYTES L1_CACHE_BYTES
472
473 #endif
474diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
475index 968d999..d36b2df 100644
476--- a/arch/alpha/include/asm/elf.h
477+++ b/arch/alpha/include/asm/elf.h
478@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
479
480 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
481
482+#ifdef CONFIG_PAX_ASLR
483+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
484+
485+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
486+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
487+#endif
488+
489 /* $0 is set by ld.so to a pointer to a function which might be
490 registered using atexit. This provides a mean for the dynamic
491 linker to call DT_FINI functions for shared libraries that have
492diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
493index bc2a0da..8ad11ee 100644
494--- a/arch/alpha/include/asm/pgalloc.h
495+++ b/arch/alpha/include/asm/pgalloc.h
496@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
497 pgd_set(pgd, pmd);
498 }
499
500+static inline void
501+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
502+{
503+ pgd_populate(mm, pgd, pmd);
504+}
505+
506 extern pgd_t *pgd_alloc(struct mm_struct *mm);
507
508 static inline void
509diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
510index 81a4342..348b927 100644
511--- a/arch/alpha/include/asm/pgtable.h
512+++ b/arch/alpha/include/asm/pgtable.h
513@@ -102,6 +102,17 @@ struct vm_area_struct;
514 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
515 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
516 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
517+
518+#ifdef CONFIG_PAX_PAGEEXEC
519+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
520+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
521+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
522+#else
523+# define PAGE_SHARED_NOEXEC PAGE_SHARED
524+# define PAGE_COPY_NOEXEC PAGE_COPY
525+# define PAGE_READONLY_NOEXEC PAGE_READONLY
526+#endif
527+
528 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
529
530 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
531diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
532index 2fd00b7..cfd5069 100644
533--- a/arch/alpha/kernel/module.c
534+++ b/arch/alpha/kernel/module.c
535@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
536
537 /* The small sections were sorted to the end of the segment.
538 The following should definitely cover them. */
539- gp = (u64)me->module_core + me->core_size - 0x8000;
540+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
541 got = sechdrs[me->arch.gotsecindex].sh_addr;
542
543 for (i = 0; i < n; i++) {
544diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
545index 49ee319..9ee7d14 100644
546--- a/arch/alpha/kernel/osf_sys.c
547+++ b/arch/alpha/kernel/osf_sys.c
548@@ -1146,7 +1146,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
549 /* At this point: (!vma || addr < vma->vm_end). */
550 if (limit - len < addr)
551 return -ENOMEM;
552- if (!vma || addr + len <= vma->vm_start)
553+ if (check_heap_stack_gap(vma, addr, len))
554 return addr;
555 addr = vma->vm_end;
556 vma = vma->vm_next;
557@@ -1182,6 +1182,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
558 merely specific addresses, but regions of memory -- perhaps
559 this feature should be incorporated into all ports? */
560
561+#ifdef CONFIG_PAX_RANDMMAP
562+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
563+#endif
564+
565 if (addr) {
566 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
567 if (addr != (unsigned long) -ENOMEM)
568@@ -1189,8 +1193,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
569 }
570
571 /* Next, try allocating at TASK_UNMAPPED_BASE. */
572- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
573- len, limit);
574+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
575+
576 if (addr != (unsigned long) -ENOMEM)
577 return addr;
578
579diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
580index 5eecab1..609abc0 100644
581--- a/arch/alpha/mm/fault.c
582+++ b/arch/alpha/mm/fault.c
583@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
584 __reload_thread(pcb);
585 }
586
587+#ifdef CONFIG_PAX_PAGEEXEC
588+/*
589+ * PaX: decide what to do with offenders (regs->pc = fault address)
590+ *
591+ * returns 1 when task should be killed
592+ * 2 when patched PLT trampoline was detected
593+ * 3 when unpatched PLT trampoline was detected
594+ */
595+static int pax_handle_fetch_fault(struct pt_regs *regs)
596+{
597+
598+#ifdef CONFIG_PAX_EMUPLT
599+ int err;
600+
601+ do { /* PaX: patched PLT emulation #1 */
602+ unsigned int ldah, ldq, jmp;
603+
604+ err = get_user(ldah, (unsigned int *)regs->pc);
605+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
606+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
607+
608+ if (err)
609+ break;
610+
611+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
612+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
613+ jmp == 0x6BFB0000U)
614+ {
615+ unsigned long r27, addr;
616+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
617+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
618+
619+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
620+ err = get_user(r27, (unsigned long *)addr);
621+ if (err)
622+ break;
623+
624+ regs->r27 = r27;
625+ regs->pc = r27;
626+ return 2;
627+ }
628+ } while (0);
629+
630+ do { /* PaX: patched PLT emulation #2 */
631+ unsigned int ldah, lda, br;
632+
633+ err = get_user(ldah, (unsigned int *)regs->pc);
634+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
635+ err |= get_user(br, (unsigned int *)(regs->pc+8));
636+
637+ if (err)
638+ break;
639+
640+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
641+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
642+ (br & 0xFFE00000U) == 0xC3E00000U)
643+ {
644+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
645+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
646+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
647+
648+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
649+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
650+ return 2;
651+ }
652+ } while (0);
653+
654+ do { /* PaX: unpatched PLT emulation */
655+ unsigned int br;
656+
657+ err = get_user(br, (unsigned int *)regs->pc);
658+
659+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
660+ unsigned int br2, ldq, nop, jmp;
661+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
662+
663+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
664+ err = get_user(br2, (unsigned int *)addr);
665+ err |= get_user(ldq, (unsigned int *)(addr+4));
666+ err |= get_user(nop, (unsigned int *)(addr+8));
667+ err |= get_user(jmp, (unsigned int *)(addr+12));
668+ err |= get_user(resolver, (unsigned long *)(addr+16));
669+
670+ if (err)
671+ break;
672+
673+ if (br2 == 0xC3600000U &&
674+ ldq == 0xA77B000CU &&
675+ nop == 0x47FF041FU &&
676+ jmp == 0x6B7B0000U)
677+ {
678+ regs->r28 = regs->pc+4;
679+ regs->r27 = addr+16;
680+ regs->pc = resolver;
681+ return 3;
682+ }
683+ }
684+ } while (0);
685+#endif
686+
687+ return 1;
688+}
689+
690+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
691+{
692+ unsigned long i;
693+
694+ printk(KERN_ERR "PAX: bytes at PC: ");
695+ for (i = 0; i < 5; i++) {
696+ unsigned int c;
697+ if (get_user(c, (unsigned int *)pc+i))
698+ printk(KERN_CONT "???????? ");
699+ else
700+ printk(KERN_CONT "%08x ", c);
701+ }
702+ printk("\n");
703+}
704+#endif
705
706 /*
707 * This routine handles page faults. It determines the address,
708@@ -130,8 +248,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
709 good_area:
710 si_code = SEGV_ACCERR;
711 if (cause < 0) {
712- if (!(vma->vm_flags & VM_EXEC))
713+ if (!(vma->vm_flags & VM_EXEC)) {
714+
715+#ifdef CONFIG_PAX_PAGEEXEC
716+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
717+ goto bad_area;
718+
719+ up_read(&mm->mmap_sem);
720+ switch (pax_handle_fetch_fault(regs)) {
721+
722+#ifdef CONFIG_PAX_EMUPLT
723+ case 2:
724+ case 3:
725+ return;
726+#endif
727+
728+ }
729+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
730+ do_group_exit(SIGKILL);
731+#else
732 goto bad_area;
733+#endif
734+
735+ }
736 } else if (!cause) {
737 /* Allow reads even for write-only mappings */
738 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
739diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
740index 68374ba..cff7196 100644
741--- a/arch/arm/include/asm/atomic.h
742+++ b/arch/arm/include/asm/atomic.h
743@@ -17,17 +17,35 @@
744 #include <asm/barrier.h>
745 #include <asm/cmpxchg.h>
746
747+#ifdef CONFIG_GENERIC_ATOMIC64
748+#include <asm-generic/atomic64.h>
749+#endif
750+
751 #define ATOMIC_INIT(i) { (i) }
752
753 #ifdef __KERNEL__
754
755+#define _ASM_EXTABLE(from, to) \
756+" .pushsection __ex_table,\"a\"\n"\
757+" .align 3\n" \
758+" .long " #from ", " #to"\n" \
759+" .popsection"
760+
761 /*
762 * On ARM, ordinary assignment (str instruction) doesn't clear the local
763 * strex/ldrex monitor on some implementations. The reason we can use it for
764 * atomic_set() is the clrex or dummy strex done on every exception return.
765 */
766 #define atomic_read(v) (*(volatile int *)&(v)->counter)
767+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
768+{
769+ return v->counter;
770+}
771 #define atomic_set(v,i) (((v)->counter) = (i))
772+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
773+{
774+ v->counter = i;
775+}
776
777 #if __LINUX_ARM_ARCH__ >= 6
778
779@@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
780 int result;
781
782 __asm__ __volatile__("@ atomic_add\n"
783+"1: ldrex %1, [%3]\n"
784+" adds %0, %1, %4\n"
785+
786+#ifdef CONFIG_PAX_REFCOUNT
787+" bvc 3f\n"
788+"2: bkpt 0xf103\n"
789+"3:\n"
790+#endif
791+
792+" strex %1, %0, [%3]\n"
793+" teq %1, #0\n"
794+" bne 1b"
795+
796+#ifdef CONFIG_PAX_REFCOUNT
797+"\n4:\n"
798+ _ASM_EXTABLE(2b, 4b)
799+#endif
800+
801+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
802+ : "r" (&v->counter), "Ir" (i)
803+ : "cc");
804+}
805+
806+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
807+{
808+ unsigned long tmp;
809+ int result;
810+
811+ __asm__ __volatile__("@ atomic_add_unchecked\n"
812 "1: ldrex %0, [%3]\n"
813 " add %0, %0, %4\n"
814 " strex %1, %0, [%3]\n"
815@@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
816 smp_mb();
817
818 __asm__ __volatile__("@ atomic_add_return\n"
819+"1: ldrex %1, [%3]\n"
820+" adds %0, %1, %4\n"
821+
822+#ifdef CONFIG_PAX_REFCOUNT
823+" bvc 3f\n"
824+" mov %0, %1\n"
825+"2: bkpt 0xf103\n"
826+"3:\n"
827+#endif
828+
829+" strex %1, %0, [%3]\n"
830+" teq %1, #0\n"
831+" bne 1b"
832+
833+#ifdef CONFIG_PAX_REFCOUNT
834+"\n4:\n"
835+ _ASM_EXTABLE(2b, 4b)
836+#endif
837+
838+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
839+ : "r" (&v->counter), "Ir" (i)
840+ : "cc");
841+
842+ smp_mb();
843+
844+ return result;
845+}
846+
847+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
848+{
849+ unsigned long tmp;
850+ int result;
851+
852+ smp_mb();
853+
854+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
855 "1: ldrex %0, [%3]\n"
856 " add %0, %0, %4\n"
857 " strex %1, %0, [%3]\n"
858@@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
859 int result;
860
861 __asm__ __volatile__("@ atomic_sub\n"
862+"1: ldrex %1, [%3]\n"
863+" subs %0, %1, %4\n"
864+
865+#ifdef CONFIG_PAX_REFCOUNT
866+" bvc 3f\n"
867+"2: bkpt 0xf103\n"
868+"3:\n"
869+#endif
870+
871+" strex %1, %0, [%3]\n"
872+" teq %1, #0\n"
873+" bne 1b"
874+
875+#ifdef CONFIG_PAX_REFCOUNT
876+"\n4:\n"
877+ _ASM_EXTABLE(2b, 4b)
878+#endif
879+
880+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
881+ : "r" (&v->counter), "Ir" (i)
882+ : "cc");
883+}
884+
885+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
886+{
887+ unsigned long tmp;
888+ int result;
889+
890+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
891 "1: ldrex %0, [%3]\n"
892 " sub %0, %0, %4\n"
893 " strex %1, %0, [%3]\n"
894@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
895 smp_mb();
896
897 __asm__ __volatile__("@ atomic_sub_return\n"
898-"1: ldrex %0, [%3]\n"
899-" sub %0, %0, %4\n"
900+"1: ldrex %1, [%3]\n"
901+" sub %0, %1, %4\n"
902+
903+#ifdef CONFIG_PAX_REFCOUNT
904+" bvc 3f\n"
905+" mov %0, %1\n"
906+"2: bkpt 0xf103\n"
907+"3:\n"
908+#endif
909+
910 " strex %1, %0, [%3]\n"
911 " teq %1, #0\n"
912 " bne 1b"
913+
914+#ifdef CONFIG_PAX_REFCOUNT
915+"\n4:\n"
916+ _ASM_EXTABLE(2b, 4b)
917+#endif
918+
919 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
920 : "r" (&v->counter), "Ir" (i)
921 : "cc");
922@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
923 return oldval;
924 }
925
926+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
927+{
928+ unsigned long oldval, res;
929+
930+ smp_mb();
931+
932+ do {
933+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
934+ "ldrex %1, [%3]\n"
935+ "mov %0, #0\n"
936+ "teq %1, %4\n"
937+ "strexeq %0, %5, [%3]\n"
938+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
939+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
940+ : "cc");
941+ } while (res);
942+
943+ smp_mb();
944+
945+ return oldval;
946+}
947+
948 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
949 {
950 unsigned long tmp, tmp2;
951@@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
952
953 return val;
954 }
955+
956+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
957+{
958+ return atomic_add_return(i, v);
959+}
960+
961 #define atomic_add(i, v) (void) atomic_add_return(i, v)
962+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
963+{
964+ (void) atomic_add_return(i, v);
965+}
966
967 static inline int atomic_sub_return(int i, atomic_t *v)
968 {
969@@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
970 return val;
971 }
972 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
973+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
974+{
975+ (void) atomic_sub_return(i, v);
976+}
977
978 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
979 {
980@@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
981 return ret;
982 }
983
984+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
985+{
986+ return atomic_cmpxchg(v, old, new);
987+}
988+
989 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
990 {
991 unsigned long flags;
992@@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
993 #endif /* __LINUX_ARM_ARCH__ */
994
995 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
996+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
997+{
998+ return xchg(&v->counter, new);
999+}
1000
1001 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1002 {
1003@@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1004 }
1005
1006 #define atomic_inc(v) atomic_add(1, v)
1007+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1008+{
1009+ atomic_add_unchecked(1, v);
1010+}
1011 #define atomic_dec(v) atomic_sub(1, v)
1012+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1013+{
1014+ atomic_sub_unchecked(1, v);
1015+}
1016
1017 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1018+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1019+{
1020+ return atomic_add_return_unchecked(1, v) == 0;
1021+}
1022 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1023 #define atomic_inc_return(v) (atomic_add_return(1, v))
1024+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1025+{
1026+ return atomic_add_return_unchecked(1, v);
1027+}
1028 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1029 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1030
1031@@ -241,6 +428,14 @@ typedef struct {
1032 u64 __aligned(8) counter;
1033 } atomic64_t;
1034
1035+#ifdef CONFIG_PAX_REFCOUNT
1036+typedef struct {
1037+ u64 __aligned(8) counter;
1038+} atomic64_unchecked_t;
1039+#else
1040+typedef atomic64_t atomic64_unchecked_t;
1041+#endif
1042+
1043 #define ATOMIC64_INIT(i) { (i) }
1044
1045 static inline u64 atomic64_read(atomic64_t *v)
1046@@ -256,6 +451,19 @@ static inline u64 atomic64_read(atomic64_t *v)
1047 return result;
1048 }
1049
1050+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1051+{
1052+ u64 result;
1053+
1054+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1055+" ldrexd %0, %H0, [%1]"
1056+ : "=&r" (result)
1057+ : "r" (&v->counter), "Qo" (v->counter)
1058+ );
1059+
1060+ return result;
1061+}
1062+
1063 static inline void atomic64_set(atomic64_t *v, u64 i)
1064 {
1065 u64 tmp;
1066@@ -270,6 +478,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1067 : "cc");
1068 }
1069
1070+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1071+{
1072+ u64 tmp;
1073+
1074+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1075+"1: ldrexd %0, %H0, [%2]\n"
1076+" strexd %0, %3, %H3, [%2]\n"
1077+" teq %0, #0\n"
1078+" bne 1b"
1079+ : "=&r" (tmp), "=Qo" (v->counter)
1080+ : "r" (&v->counter), "r" (i)
1081+ : "cc");
1082+}
1083+
1084 static inline void atomic64_add(u64 i, atomic64_t *v)
1085 {
1086 u64 result;
1087@@ -278,6 +500,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1088 __asm__ __volatile__("@ atomic64_add\n"
1089 "1: ldrexd %0, %H0, [%3]\n"
1090 " adds %0, %0, %4\n"
1091+" adcs %H0, %H0, %H4\n"
1092+
1093+#ifdef CONFIG_PAX_REFCOUNT
1094+" bvc 3f\n"
1095+"2: bkpt 0xf103\n"
1096+"3:\n"
1097+#endif
1098+
1099+" strexd %1, %0, %H0, [%3]\n"
1100+" teq %1, #0\n"
1101+" bne 1b"
1102+
1103+#ifdef CONFIG_PAX_REFCOUNT
1104+"\n4:\n"
1105+ _ASM_EXTABLE(2b, 4b)
1106+#endif
1107+
1108+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1109+ : "r" (&v->counter), "r" (i)
1110+ : "cc");
1111+}
1112+
1113+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1114+{
1115+ u64 result;
1116+ unsigned long tmp;
1117+
1118+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1119+"1: ldrexd %0, %H0, [%3]\n"
1120+" adds %0, %0, %4\n"
1121 " adc %H0, %H0, %H4\n"
1122 " strexd %1, %0, %H0, [%3]\n"
1123 " teq %1, #0\n"
1124@@ -289,12 +541,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1125
1126 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1127 {
1128- u64 result;
1129- unsigned long tmp;
1130+ u64 result, tmp;
1131
1132 smp_mb();
1133
1134 __asm__ __volatile__("@ atomic64_add_return\n"
1135+"1: ldrexd %1, %H1, [%3]\n"
1136+" adds %0, %1, %4\n"
1137+" adcs %H0, %H1, %H4\n"
1138+
1139+#ifdef CONFIG_PAX_REFCOUNT
1140+" bvc 3f\n"
1141+" mov %0, %1\n"
1142+" mov %H0, %H1\n"
1143+"2: bkpt 0xf103\n"
1144+"3:\n"
1145+#endif
1146+
1147+" strexd %1, %0, %H0, [%3]\n"
1148+" teq %1, #0\n"
1149+" bne 1b"
1150+
1151+#ifdef CONFIG_PAX_REFCOUNT
1152+"\n4:\n"
1153+ _ASM_EXTABLE(2b, 4b)
1154+#endif
1155+
1156+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1157+ : "r" (&v->counter), "r" (i)
1158+ : "cc");
1159+
1160+ smp_mb();
1161+
1162+ return result;
1163+}
1164+
1165+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1166+{
1167+ u64 result;
1168+ unsigned long tmp;
1169+
1170+ smp_mb();
1171+
1172+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1173 "1: ldrexd %0, %H0, [%3]\n"
1174 " adds %0, %0, %4\n"
1175 " adc %H0, %H0, %H4\n"
1176@@ -318,6 +607,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1177 __asm__ __volatile__("@ atomic64_sub\n"
1178 "1: ldrexd %0, %H0, [%3]\n"
1179 " subs %0, %0, %4\n"
1180+" sbcs %H0, %H0, %H4\n"
1181+
1182+#ifdef CONFIG_PAX_REFCOUNT
1183+" bvc 3f\n"
1184+"2: bkpt 0xf103\n"
1185+"3:\n"
1186+#endif
1187+
1188+" strexd %1, %0, %H0, [%3]\n"
1189+" teq %1, #0\n"
1190+" bne 1b"
1191+
1192+#ifdef CONFIG_PAX_REFCOUNT
1193+"\n4:\n"
1194+ _ASM_EXTABLE(2b, 4b)
1195+#endif
1196+
1197+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1198+ : "r" (&v->counter), "r" (i)
1199+ : "cc");
1200+}
1201+
1202+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1203+{
1204+ u64 result;
1205+ unsigned long tmp;
1206+
1207+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1208+"1: ldrexd %0, %H0, [%3]\n"
1209+" subs %0, %0, %4\n"
1210 " sbc %H0, %H0, %H4\n"
1211 " strexd %1, %0, %H0, [%3]\n"
1212 " teq %1, #0\n"
1213@@ -329,18 +648,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1214
1215 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1216 {
1217- u64 result;
1218- unsigned long tmp;
1219+ u64 result, tmp;
1220
1221 smp_mb();
1222
1223 __asm__ __volatile__("@ atomic64_sub_return\n"
1224-"1: ldrexd %0, %H0, [%3]\n"
1225-" subs %0, %0, %4\n"
1226-" sbc %H0, %H0, %H4\n"
1227+"1: ldrexd %1, %H1, [%3]\n"
1228+" subs %0, %1, %4\n"
1229+" sbc %H0, %H1, %H4\n"
1230+
1231+#ifdef CONFIG_PAX_REFCOUNT
1232+" bvc 3f\n"
1233+" mov %0, %1\n"
1234+" mov %H0, %H1\n"
1235+"2: bkpt 0xf103\n"
1236+"3:\n"
1237+#endif
1238+
1239 " strexd %1, %0, %H0, [%3]\n"
1240 " teq %1, #0\n"
1241 " bne 1b"
1242+
1243+#ifdef CONFIG_PAX_REFCOUNT
1244+"\n4:\n"
1245+ _ASM_EXTABLE(2b, 4b)
1246+#endif
1247+
1248 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1249 : "r" (&v->counter), "r" (i)
1250 : "cc");
1251@@ -374,6 +707,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1252 return oldval;
1253 }
1254
1255+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1256+{
1257+ u64 oldval;
1258+ unsigned long res;
1259+
1260+ smp_mb();
1261+
1262+ do {
1263+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1264+ "ldrexd %1, %H1, [%3]\n"
1265+ "mov %0, #0\n"
1266+ "teq %1, %4\n"
1267+ "teqeq %H1, %H4\n"
1268+ "strexdeq %0, %5, %H5, [%3]"
1269+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1270+ : "r" (&ptr->counter), "r" (old), "r" (new)
1271+ : "cc");
1272+ } while (res);
1273+
1274+ smp_mb();
1275+
1276+ return oldval;
1277+}
1278+
1279 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1280 {
1281 u64 result;
1282@@ -397,21 +754,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1283
1284 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1285 {
1286- u64 result;
1287- unsigned long tmp;
1288+ u64 result, tmp;
1289
1290 smp_mb();
1291
1292 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1293-"1: ldrexd %0, %H0, [%3]\n"
1294-" subs %0, %0, #1\n"
1295-" sbc %H0, %H0, #0\n"
1296+"1: ldrexd %1, %H1, [%3]\n"
1297+" subs %0, %1, #1\n"
1298+" sbc %H0, %H1, #0\n"
1299+
1300+#ifdef CONFIG_PAX_REFCOUNT
1301+" bvc 3f\n"
1302+" mov %0, %1\n"
1303+" mov %H0, %H1\n"
1304+"2: bkpt 0xf103\n"
1305+"3:\n"
1306+#endif
1307+
1308 " teq %H0, #0\n"
1309-" bmi 2f\n"
1310+" bmi 4f\n"
1311 " strexd %1, %0, %H0, [%3]\n"
1312 " teq %1, #0\n"
1313 " bne 1b\n"
1314-"2:"
1315+"4:\n"
1316+
1317+#ifdef CONFIG_PAX_REFCOUNT
1318+ _ASM_EXTABLE(2b, 4b)
1319+#endif
1320+
1321 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1322 : "r" (&v->counter)
1323 : "cc");
1324@@ -434,13 +804,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1325 " teq %0, %5\n"
1326 " teqeq %H0, %H5\n"
1327 " moveq %1, #0\n"
1328-" beq 2f\n"
1329+" beq 4f\n"
1330 " adds %0, %0, %6\n"
1331 " adc %H0, %H0, %H6\n"
1332+
1333+#ifdef CONFIG_PAX_REFCOUNT
1334+" bvc 3f\n"
1335+"2: bkpt 0xf103\n"
1336+"3:\n"
1337+#endif
1338+
1339 " strexd %2, %0, %H0, [%4]\n"
1340 " teq %2, #0\n"
1341 " bne 1b\n"
1342-"2:"
1343+"4:\n"
1344+
1345+#ifdef CONFIG_PAX_REFCOUNT
1346+ _ASM_EXTABLE(2b, 4b)
1347+#endif
1348+
1349 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1350 : "r" (&v->counter), "r" (u), "r" (a)
1351 : "cc");
1352@@ -453,10 +835,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1353
1354 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1355 #define atomic64_inc(v) atomic64_add(1LL, (v))
1356+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1357 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1358+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1359 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1360 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1361 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1362+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1363 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1364 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1365 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1366diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1367index 75fe66b..2255c86 100644
1368--- a/arch/arm/include/asm/cache.h
1369+++ b/arch/arm/include/asm/cache.h
1370@@ -4,8 +4,10 @@
1371 #ifndef __ASMARM_CACHE_H
1372 #define __ASMARM_CACHE_H
1373
1374+#include <linux/const.h>
1375+
1376 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1377-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1378+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1379
1380 /*
1381 * Memory returned by kmalloc() may be used for DMA, so we must make
1382diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1383index 1252a26..9dc17b5 100644
1384--- a/arch/arm/include/asm/cacheflush.h
1385+++ b/arch/arm/include/asm/cacheflush.h
1386@@ -108,7 +108,7 @@ struct cpu_cache_fns {
1387 void (*dma_unmap_area)(const void *, size_t, int);
1388
1389 void (*dma_flush_range)(const void *, const void *);
1390-};
1391+} __no_const;
1392
1393 /*
1394 * Select the calling method
1395diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1396index d41d7cb..9bea5e0 100644
1397--- a/arch/arm/include/asm/cmpxchg.h
1398+++ b/arch/arm/include/asm/cmpxchg.h
1399@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1400
1401 #define xchg(ptr,x) \
1402 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1403+#define xchg_unchecked(ptr,x) \
1404+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1405
1406 #include <asm-generic/cmpxchg-local.h>
1407
1408diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1409index 38050b1..9d90e8b 100644
1410--- a/arch/arm/include/asm/elf.h
1411+++ b/arch/arm/include/asm/elf.h
1412@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1413 the loader. We need to make sure that it is out of the way of the program
1414 that it will "exec", and that there is sufficient room for the brk. */
1415
1416-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1417+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1418+
1419+#ifdef CONFIG_PAX_ASLR
1420+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1421+
1422+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1423+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1424+#endif
1425
1426 /* When the program starts, a1 contains a pointer to a function to be
1427 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1428@@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1429 extern void elf_set_personality(const struct elf32_hdr *);
1430 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1431
1432-struct mm_struct;
1433-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1434-#define arch_randomize_brk arch_randomize_brk
1435-
1436 #endif
1437diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1438index e51b1e8..32a3113 100644
1439--- a/arch/arm/include/asm/kmap_types.h
1440+++ b/arch/arm/include/asm/kmap_types.h
1441@@ -21,6 +21,7 @@ enum km_type {
1442 KM_L1_CACHE,
1443 KM_L2_CACHE,
1444 KM_KDB,
1445+ KM_CLEARPAGE,
1446 KM_TYPE_NR
1447 };
1448
1449diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1450index 53426c6..c7baff3 100644
1451--- a/arch/arm/include/asm/outercache.h
1452+++ b/arch/arm/include/asm/outercache.h
1453@@ -35,7 +35,7 @@ struct outer_cache_fns {
1454 #endif
1455 void (*set_debug)(unsigned long);
1456 void (*resume)(void);
1457-};
1458+} __no_const;
1459
1460 #ifdef CONFIG_OUTER_CACHE
1461
1462diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1463index 5838361..da6e813 100644
1464--- a/arch/arm/include/asm/page.h
1465+++ b/arch/arm/include/asm/page.h
1466@@ -123,7 +123,7 @@ struct cpu_user_fns {
1467 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1468 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1469 unsigned long vaddr, struct vm_area_struct *vma);
1470-};
1471+} __no_const;
1472
1473 #ifdef MULTI_USER
1474 extern struct cpu_user_fns cpu_user;
1475diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1476index 943504f..bf8d667 100644
1477--- a/arch/arm/include/asm/pgalloc.h
1478+++ b/arch/arm/include/asm/pgalloc.h
1479@@ -43,6 +43,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1480 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1481 }
1482
1483+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1484+{
1485+ pud_populate(mm, pud, pmd);
1486+}
1487+
1488 #else /* !CONFIG_ARM_LPAE */
1489
1490 /*
1491@@ -51,6 +56,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1492 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1493 #define pmd_free(mm, pmd) do { } while (0)
1494 #define pud_populate(mm,pmd,pte) BUG()
1495+#define pud_populate_kernel(mm,pmd,pte) BUG()
1496
1497 #endif /* CONFIG_ARM_LPAE */
1498
1499diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
1500index 0f04d84..2be5648 100644
1501--- a/arch/arm/include/asm/thread_info.h
1502+++ b/arch/arm/include/asm/thread_info.h
1503@@ -148,6 +148,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1504 #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
1505 #define TIF_SYSCALL_TRACE 8
1506 #define TIF_SYSCALL_AUDIT 9
1507+
1508+/* within 8 bits of TIF_SYSCALL_TRACE
1509+ to meet flexible second operand requirements
1510+*/
1511+#define TIF_GRSEC_SETXID 10
1512+
1513 #define TIF_POLLING_NRFLAG 16
1514 #define TIF_USING_IWMMXT 17
1515 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
1516@@ -163,9 +169,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1517 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
1518 #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
1519 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
1520+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
1521
1522 /* Checks for any syscall work in entry-common.S */
1523-#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
1524+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
1525+ _TIF_GRSEC_SETXID)
1526
1527 /*
1528 * Change these and you break ASM code in entry-common.S
1529diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1530index 71f6536..602f279 100644
1531--- a/arch/arm/include/asm/uaccess.h
1532+++ b/arch/arm/include/asm/uaccess.h
1533@@ -22,6 +22,8 @@
1534 #define VERIFY_READ 0
1535 #define VERIFY_WRITE 1
1536
1537+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1538+
1539 /*
1540 * The exception table consists of pairs of addresses: the first is the
1541 * address of an instruction that is allowed to fault, and the second is
1542@@ -387,8 +389,23 @@ do { \
1543
1544
1545 #ifdef CONFIG_MMU
1546-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1547-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1548+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1549+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1550+
1551+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1552+{
1553+ if (!__builtin_constant_p(n))
1554+ check_object_size(to, n, false);
1555+ return ___copy_from_user(to, from, n);
1556+}
1557+
1558+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1559+{
1560+ if (!__builtin_constant_p(n))
1561+ check_object_size(from, n, true);
1562+ return ___copy_to_user(to, from, n);
1563+}
1564+
1565 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1566 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1567 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1568@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
1569
1570 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1571 {
1572+ if ((long)n < 0)
1573+ return n;
1574+
1575 if (access_ok(VERIFY_READ, from, n))
1576 n = __copy_from_user(to, from, n);
1577 else /* security hole - plug it */
1578@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1579
1580 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1581 {
1582+ if ((long)n < 0)
1583+ return n;
1584+
1585 if (access_ok(VERIFY_WRITE, to, n))
1586 n = __copy_to_user(to, from, n);
1587 return n;
1588diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1589index b57c75e..ed2d6b2 100644
1590--- a/arch/arm/kernel/armksyms.c
1591+++ b/arch/arm/kernel/armksyms.c
1592@@ -94,8 +94,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
1593 #ifdef CONFIG_MMU
1594 EXPORT_SYMBOL(copy_page);
1595
1596-EXPORT_SYMBOL(__copy_from_user);
1597-EXPORT_SYMBOL(__copy_to_user);
1598+EXPORT_SYMBOL(___copy_from_user);
1599+EXPORT_SYMBOL(___copy_to_user);
1600 EXPORT_SYMBOL(__clear_user);
1601
1602 EXPORT_SYMBOL(__get_user_1);
1603diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1604index 2b7b017..c380fa2 100644
1605--- a/arch/arm/kernel/process.c
1606+++ b/arch/arm/kernel/process.c
1607@@ -28,7 +28,6 @@
1608 #include <linux/tick.h>
1609 #include <linux/utsname.h>
1610 #include <linux/uaccess.h>
1611-#include <linux/random.h>
1612 #include <linux/hw_breakpoint.h>
1613 #include <linux/cpuidle.h>
1614
1615@@ -275,9 +274,10 @@ void machine_power_off(void)
1616 machine_shutdown();
1617 if (pm_power_off)
1618 pm_power_off();
1619+ BUG();
1620 }
1621
1622-void machine_restart(char *cmd)
1623+__noreturn void machine_restart(char *cmd)
1624 {
1625 machine_shutdown();
1626
1627@@ -519,12 +519,6 @@ unsigned long get_wchan(struct task_struct *p)
1628 return 0;
1629 }
1630
1631-unsigned long arch_randomize_brk(struct mm_struct *mm)
1632-{
1633- unsigned long range_end = mm->brk + 0x02000000;
1634- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
1635-}
1636-
1637 #ifdef CONFIG_MMU
1638 /*
1639 * The vectors page is always readable from user space for the
1640diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
1641index 9650c14..ae30cdd 100644
1642--- a/arch/arm/kernel/ptrace.c
1643+++ b/arch/arm/kernel/ptrace.c
1644@@ -906,10 +906,19 @@ long arch_ptrace(struct task_struct *child, long request,
1645 return ret;
1646 }
1647
1648+#ifdef CONFIG_GRKERNSEC_SETXID
1649+extern void gr_delayed_cred_worker(void);
1650+#endif
1651+
1652 asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
1653 {
1654 unsigned long ip;
1655
1656+#ifdef CONFIG_GRKERNSEC_SETXID
1657+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
1658+ gr_delayed_cred_worker();
1659+#endif
1660+
1661 if (why)
1662 audit_syscall_exit(regs);
1663 else
1664diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
1665index ebfac78..cbea9c0 100644
1666--- a/arch/arm/kernel/setup.c
1667+++ b/arch/arm/kernel/setup.c
1668@@ -111,13 +111,13 @@ struct processor processor __read_mostly;
1669 struct cpu_tlb_fns cpu_tlb __read_mostly;
1670 #endif
1671 #ifdef MULTI_USER
1672-struct cpu_user_fns cpu_user __read_mostly;
1673+struct cpu_user_fns cpu_user __read_only;
1674 #endif
1675 #ifdef MULTI_CACHE
1676-struct cpu_cache_fns cpu_cache __read_mostly;
1677+struct cpu_cache_fns cpu_cache __read_only;
1678 #endif
1679 #ifdef CONFIG_OUTER_CACHE
1680-struct outer_cache_fns outer_cache __read_mostly;
1681+struct outer_cache_fns outer_cache __read_only;
1682 EXPORT_SYMBOL(outer_cache);
1683 #endif
1684
1685diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
1686index 63d402f..db1d714 100644
1687--- a/arch/arm/kernel/traps.c
1688+++ b/arch/arm/kernel/traps.c
1689@@ -264,6 +264,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
1690
1691 static DEFINE_RAW_SPINLOCK(die_lock);
1692
1693+extern void gr_handle_kernel_exploit(void);
1694+
1695 /*
1696 * This function is protected against re-entrancy.
1697 */
1698@@ -296,6 +298,9 @@ void die(const char *str, struct pt_regs *regs, int err)
1699 panic("Fatal exception in interrupt");
1700 if (panic_on_oops)
1701 panic("Fatal exception");
1702+
1703+ gr_handle_kernel_exploit();
1704+
1705 if (ret != NOTIFY_STOP)
1706 do_exit(SIGSEGV);
1707 }
1708diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1709index 66a477a..bee61d3 100644
1710--- a/arch/arm/lib/copy_from_user.S
1711+++ b/arch/arm/lib/copy_from_user.S
1712@@ -16,7 +16,7 @@
1713 /*
1714 * Prototype:
1715 *
1716- * size_t __copy_from_user(void *to, const void *from, size_t n)
1717+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
1718 *
1719 * Purpose:
1720 *
1721@@ -84,11 +84,11 @@
1722
1723 .text
1724
1725-ENTRY(__copy_from_user)
1726+ENTRY(___copy_from_user)
1727
1728 #include "copy_template.S"
1729
1730-ENDPROC(__copy_from_user)
1731+ENDPROC(___copy_from_user)
1732
1733 .pushsection .fixup,"ax"
1734 .align 0
1735diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1736index 6ee2f67..d1cce76 100644
1737--- a/arch/arm/lib/copy_page.S
1738+++ b/arch/arm/lib/copy_page.S
1739@@ -10,6 +10,7 @@
1740 * ASM optimised string functions
1741 */
1742 #include <linux/linkage.h>
1743+#include <linux/const.h>
1744 #include <asm/assembler.h>
1745 #include <asm/asm-offsets.h>
1746 #include <asm/cache.h>
1747diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1748index d066df6..df28194 100644
1749--- a/arch/arm/lib/copy_to_user.S
1750+++ b/arch/arm/lib/copy_to_user.S
1751@@ -16,7 +16,7 @@
1752 /*
1753 * Prototype:
1754 *
1755- * size_t __copy_to_user(void *to, const void *from, size_t n)
1756+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
1757 *
1758 * Purpose:
1759 *
1760@@ -88,11 +88,11 @@
1761 .text
1762
1763 ENTRY(__copy_to_user_std)
1764-WEAK(__copy_to_user)
1765+WEAK(___copy_to_user)
1766
1767 #include "copy_template.S"
1768
1769-ENDPROC(__copy_to_user)
1770+ENDPROC(___copy_to_user)
1771 ENDPROC(__copy_to_user_std)
1772
1773 .pushsection .fixup,"ax"
1774diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
1775index 5c908b1..e712687 100644
1776--- a/arch/arm/lib/uaccess.S
1777+++ b/arch/arm/lib/uaccess.S
1778@@ -20,7 +20,7 @@
1779
1780 #define PAGE_SHIFT 12
1781
1782-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
1783+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
1784 * Purpose : copy a block to user memory from kernel memory
1785 * Params : to - user memory
1786 * : from - kernel memory
1787@@ -40,7 +40,7 @@ USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1788 sub r2, r2, ip
1789 b .Lc2u_dest_aligned
1790
1791-ENTRY(__copy_to_user)
1792+ENTRY(___copy_to_user)
1793 stmfd sp!, {r2, r4 - r7, lr}
1794 cmp r2, #4
1795 blt .Lc2u_not_enough
1796@@ -278,14 +278,14 @@ USER( TUSER( strgeb) r3, [r0], #1) @ May fault
1797 ldrgtb r3, [r1], #0
1798 USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1799 b .Lc2u_finished
1800-ENDPROC(__copy_to_user)
1801+ENDPROC(___copy_to_user)
1802
1803 .pushsection .fixup,"ax"
1804 .align 0
1805 9001: ldmfd sp!, {r0, r4 - r7, pc}
1806 .popsection
1807
1808-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
1809+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
1810 * Purpose : copy a block from user memory to kernel memory
1811 * Params : to - kernel memory
1812 * : from - user memory
1813@@ -304,7 +304,7 @@ USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1814 sub r2, r2, ip
1815 b .Lcfu_dest_aligned
1816
1817-ENTRY(__copy_from_user)
1818+ENTRY(___copy_from_user)
1819 stmfd sp!, {r0, r2, r4 - r7, lr}
1820 cmp r2, #4
1821 blt .Lcfu_not_enough
1822@@ -544,7 +544,7 @@ USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
1823 USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1824 strgtb r3, [r0], #1
1825 b .Lcfu_finished
1826-ENDPROC(__copy_from_user)
1827+ENDPROC(___copy_from_user)
1828
1829 .pushsection .fixup,"ax"
1830 .align 0
1831diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
1832index 025f742..8432b08 100644
1833--- a/arch/arm/lib/uaccess_with_memcpy.c
1834+++ b/arch/arm/lib/uaccess_with_memcpy.c
1835@@ -104,7 +104,7 @@ out:
1836 }
1837
1838 unsigned long
1839-__copy_to_user(void __user *to, const void *from, unsigned long n)
1840+___copy_to_user(void __user *to, const void *from, unsigned long n)
1841 {
1842 /*
1843 * This test is stubbed out of the main function above to keep
1844diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
1845index 518091c..eae9a76 100644
1846--- a/arch/arm/mach-omap2/board-n8x0.c
1847+++ b/arch/arm/mach-omap2/board-n8x0.c
1848@@ -596,7 +596,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
1849 }
1850 #endif
1851
1852-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
1853+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
1854 .late_init = n8x0_menelaus_late_init,
1855 };
1856
1857diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1858index 5bb4835..4760f68 100644
1859--- a/arch/arm/mm/fault.c
1860+++ b/arch/arm/mm/fault.c
1861@@ -174,6 +174,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1862 }
1863 #endif
1864
1865+#ifdef CONFIG_PAX_PAGEEXEC
1866+ if (fsr & FSR_LNX_PF) {
1867+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1868+ do_group_exit(SIGKILL);
1869+ }
1870+#endif
1871+
1872 tsk->thread.address = addr;
1873 tsk->thread.error_code = fsr;
1874 tsk->thread.trap_no = 14;
1875@@ -397,6 +404,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1876 }
1877 #endif /* CONFIG_MMU */
1878
1879+#ifdef CONFIG_PAX_PAGEEXEC
1880+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1881+{
1882+ long i;
1883+
1884+ printk(KERN_ERR "PAX: bytes at PC: ");
1885+ for (i = 0; i < 20; i++) {
1886+ unsigned char c;
1887+ if (get_user(c, (__force unsigned char __user *)pc+i))
1888+ printk(KERN_CONT "?? ");
1889+ else
1890+ printk(KERN_CONT "%02x ", c);
1891+ }
1892+ printk("\n");
1893+
1894+ printk(KERN_ERR "PAX: bytes at SP-4: ");
1895+ for (i = -1; i < 20; i++) {
1896+ unsigned long c;
1897+ if (get_user(c, (__force unsigned long __user *)sp+i))
1898+ printk(KERN_CONT "???????? ");
1899+ else
1900+ printk(KERN_CONT "%08lx ", c);
1901+ }
1902+ printk("\n");
1903+}
1904+#endif
1905+
1906 /*
1907 * First Level Translation Fault Handler
1908 *
1909@@ -577,6 +611,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
1910 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1911 struct siginfo info;
1912
1913+#ifdef CONFIG_PAX_REFCOUNT
1914+ if (fsr_fs(ifsr) == 2) {
1915+ unsigned int bkpt;
1916+
1917+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1918+ current->thread.error_code = ifsr;
1919+ current->thread.trap_no = 0;
1920+ pax_report_refcount_overflow(regs);
1921+ fixup_exception(regs);
1922+ return;
1923+ }
1924+ }
1925+#endif
1926+
1927 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1928 return;
1929
1930diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1931index ce8cb19..3ec539d 100644
1932--- a/arch/arm/mm/mmap.c
1933+++ b/arch/arm/mm/mmap.c
1934@@ -93,6 +93,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1935 if (len > TASK_SIZE)
1936 return -ENOMEM;
1937
1938+#ifdef CONFIG_PAX_RANDMMAP
1939+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1940+#endif
1941+
1942 if (addr) {
1943 if (do_align)
1944 addr = COLOUR_ALIGN(addr, pgoff);
1945@@ -100,15 +104,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1946 addr = PAGE_ALIGN(addr);
1947
1948 vma = find_vma(mm, addr);
1949- if (TASK_SIZE - len >= addr &&
1950- (!vma || addr + len <= vma->vm_start))
1951+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1952 return addr;
1953 }
1954 if (len > mm->cached_hole_size) {
1955- start_addr = addr = mm->free_area_cache;
1956+ start_addr = addr = mm->free_area_cache;
1957 } else {
1958- start_addr = addr = mm->mmap_base;
1959- mm->cached_hole_size = 0;
1960+ start_addr = addr = mm->mmap_base;
1961+ mm->cached_hole_size = 0;
1962 }
1963
1964 full_search:
1965@@ -124,14 +127,14 @@ full_search:
1966 * Start a new search - just in case we missed
1967 * some holes.
1968 */
1969- if (start_addr != TASK_UNMAPPED_BASE) {
1970- start_addr = addr = TASK_UNMAPPED_BASE;
1971+ if (start_addr != mm->mmap_base) {
1972+ start_addr = addr = mm->mmap_base;
1973 mm->cached_hole_size = 0;
1974 goto full_search;
1975 }
1976 return -ENOMEM;
1977 }
1978- if (!vma || addr + len <= vma->vm_start) {
1979+ if (check_heap_stack_gap(vma, addr, len)) {
1980 /*
1981 * Remember the place where we stopped the search:
1982 */
1983@@ -266,10 +269,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1984
1985 if (mmap_is_legacy()) {
1986 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
1987+
1988+#ifdef CONFIG_PAX_RANDMMAP
1989+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1990+ mm->mmap_base += mm->delta_mmap;
1991+#endif
1992+
1993 mm->get_unmapped_area = arch_get_unmapped_area;
1994 mm->unmap_area = arch_unmap_area;
1995 } else {
1996 mm->mmap_base = mmap_base(random_factor);
1997+
1998+#ifdef CONFIG_PAX_RANDMMAP
1999+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2000+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2001+#endif
2002+
2003 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2004 mm->unmap_area = arch_unmap_area_topdown;
2005 }
2006diff --git a/arch/arm/plat-orion/include/plat/addr-map.h b/arch/arm/plat-orion/include/plat/addr-map.h
2007index fd556f7..af2e7d2 100644
2008--- a/arch/arm/plat-orion/include/plat/addr-map.h
2009+++ b/arch/arm/plat-orion/include/plat/addr-map.h
2010@@ -26,7 +26,7 @@ struct orion_addr_map_cfg {
2011 value in bridge_virt_base */
2012 void __iomem *(*win_cfg_base) (const struct orion_addr_map_cfg *cfg,
2013 const int win);
2014-};
2015+} __no_const;
2016
2017 /*
2018 * Information needed to setup one address mapping.
2019diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
2020index 71a6827..e7fbc23 100644
2021--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
2022+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
2023@@ -43,7 +43,7 @@ struct samsung_dma_ops {
2024 int (*started)(unsigned ch);
2025 int (*flush)(unsigned ch);
2026 int (*stop)(unsigned ch);
2027-};
2028+} __no_const;
2029
2030 extern void *samsung_dmadev_get_ops(void);
2031 extern void *s3c_dma_get_ops(void);
2032diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h
2033index 5f28cae..3d23723 100644
2034--- a/arch/arm/plat-samsung/include/plat/ehci.h
2035+++ b/arch/arm/plat-samsung/include/plat/ehci.h
2036@@ -14,7 +14,7 @@
2037 struct s5p_ehci_platdata {
2038 int (*phy_init)(struct platform_device *pdev, int type);
2039 int (*phy_exit)(struct platform_device *pdev, int type);
2040-};
2041+} __no_const;
2042
2043 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
2044
2045diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
2046index c3a58a1..78fbf54 100644
2047--- a/arch/avr32/include/asm/cache.h
2048+++ b/arch/avr32/include/asm/cache.h
2049@@ -1,8 +1,10 @@
2050 #ifndef __ASM_AVR32_CACHE_H
2051 #define __ASM_AVR32_CACHE_H
2052
2053+#include <linux/const.h>
2054+
2055 #define L1_CACHE_SHIFT 5
2056-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2057+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2058
2059 /*
2060 * Memory returned by kmalloc() may be used for DMA, so we must make
2061diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
2062index 3b3159b..425ea94 100644
2063--- a/arch/avr32/include/asm/elf.h
2064+++ b/arch/avr32/include/asm/elf.h
2065@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
2066 the loader. We need to make sure that it is out of the way of the program
2067 that it will "exec", and that there is sufficient room for the brk. */
2068
2069-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
2070+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2071
2072+#ifdef CONFIG_PAX_ASLR
2073+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
2074+
2075+#define PAX_DELTA_MMAP_LEN 15
2076+#define PAX_DELTA_STACK_LEN 15
2077+#endif
2078
2079 /* This yields a mask that user programs can use to figure out what
2080 instruction set this CPU supports. This could be done in user space,
2081diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
2082index b7f5c68..556135c 100644
2083--- a/arch/avr32/include/asm/kmap_types.h
2084+++ b/arch/avr32/include/asm/kmap_types.h
2085@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
2086 D(11) KM_IRQ1,
2087 D(12) KM_SOFTIRQ0,
2088 D(13) KM_SOFTIRQ1,
2089-D(14) KM_TYPE_NR
2090+D(14) KM_CLEARPAGE,
2091+D(15) KM_TYPE_NR
2092 };
2093
2094 #undef D
2095diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
2096index f7040a1..db9f300 100644
2097--- a/arch/avr32/mm/fault.c
2098+++ b/arch/avr32/mm/fault.c
2099@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
2100
2101 int exception_trace = 1;
2102
2103+#ifdef CONFIG_PAX_PAGEEXEC
2104+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2105+{
2106+ unsigned long i;
2107+
2108+ printk(KERN_ERR "PAX: bytes at PC: ");
2109+ for (i = 0; i < 20; i++) {
2110+ unsigned char c;
2111+ if (get_user(c, (unsigned char *)pc+i))
2112+ printk(KERN_CONT "???????? ");
2113+ else
2114+ printk(KERN_CONT "%02x ", c);
2115+ }
2116+ printk("\n");
2117+}
2118+#endif
2119+
2120 /*
2121 * This routine handles page faults. It determines the address and the
2122 * problem, and then passes it off to one of the appropriate routines.
2123@@ -156,6 +173,16 @@ bad_area:
2124 up_read(&mm->mmap_sem);
2125
2126 if (user_mode(regs)) {
2127+
2128+#ifdef CONFIG_PAX_PAGEEXEC
2129+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2130+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
2131+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
2132+ do_group_exit(SIGKILL);
2133+ }
2134+ }
2135+#endif
2136+
2137 if (exception_trace && printk_ratelimit())
2138 printk("%s%s[%d]: segfault at %08lx pc %08lx "
2139 "sp %08lx ecr %lu\n",
2140diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
2141index 568885a..f8008df 100644
2142--- a/arch/blackfin/include/asm/cache.h
2143+++ b/arch/blackfin/include/asm/cache.h
2144@@ -7,6 +7,7 @@
2145 #ifndef __ARCH_BLACKFIN_CACHE_H
2146 #define __ARCH_BLACKFIN_CACHE_H
2147
2148+#include <linux/const.h>
2149 #include <linux/linkage.h> /* for asmlinkage */
2150
2151 /*
2152@@ -14,7 +15,7 @@
2153 * Blackfin loads 32 bytes for cache
2154 */
2155 #define L1_CACHE_SHIFT 5
2156-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2157+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2158 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2159
2160 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2161diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
2162index aea2718..3639a60 100644
2163--- a/arch/cris/include/arch-v10/arch/cache.h
2164+++ b/arch/cris/include/arch-v10/arch/cache.h
2165@@ -1,8 +1,9 @@
2166 #ifndef _ASM_ARCH_CACHE_H
2167 #define _ASM_ARCH_CACHE_H
2168
2169+#include <linux/const.h>
2170 /* Etrax 100LX have 32-byte cache-lines. */
2171-#define L1_CACHE_BYTES 32
2172 #define L1_CACHE_SHIFT 5
2173+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2174
2175 #endif /* _ASM_ARCH_CACHE_H */
2176diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
2177index 1de779f..336fad3 100644
2178--- a/arch/cris/include/arch-v32/arch/cache.h
2179+++ b/arch/cris/include/arch-v32/arch/cache.h
2180@@ -1,11 +1,12 @@
2181 #ifndef _ASM_CRIS_ARCH_CACHE_H
2182 #define _ASM_CRIS_ARCH_CACHE_H
2183
2184+#include <linux/const.h>
2185 #include <arch/hwregs/dma.h>
2186
2187 /* A cache-line is 32 bytes. */
2188-#define L1_CACHE_BYTES 32
2189 #define L1_CACHE_SHIFT 5
2190+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2191
2192 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
2193
2194diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
2195index b86329d..6709906 100644
2196--- a/arch/frv/include/asm/atomic.h
2197+++ b/arch/frv/include/asm/atomic.h
2198@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
2199 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
2200 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
2201
2202+#define atomic64_read_unchecked(v) atomic64_read(v)
2203+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2204+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2205+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2206+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2207+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2208+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2209+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2210+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2211+
2212 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
2213 {
2214 int c, old;
2215diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
2216index 2797163..c2a401d 100644
2217--- a/arch/frv/include/asm/cache.h
2218+++ b/arch/frv/include/asm/cache.h
2219@@ -12,10 +12,11 @@
2220 #ifndef __ASM_CACHE_H
2221 #define __ASM_CACHE_H
2222
2223+#include <linux/const.h>
2224
2225 /* bytes per L1 cache line */
2226 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
2227-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2228+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2229
2230 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2231 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2232diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
2233index f8e16b2..c73ff79 100644
2234--- a/arch/frv/include/asm/kmap_types.h
2235+++ b/arch/frv/include/asm/kmap_types.h
2236@@ -23,6 +23,7 @@ enum km_type {
2237 KM_IRQ1,
2238 KM_SOFTIRQ0,
2239 KM_SOFTIRQ1,
2240+ KM_CLEARPAGE,
2241 KM_TYPE_NR
2242 };
2243
2244diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
2245index 385fd30..6c3d97e 100644
2246--- a/arch/frv/mm/elf-fdpic.c
2247+++ b/arch/frv/mm/elf-fdpic.c
2248@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2249 if (addr) {
2250 addr = PAGE_ALIGN(addr);
2251 vma = find_vma(current->mm, addr);
2252- if (TASK_SIZE - len >= addr &&
2253- (!vma || addr + len <= vma->vm_start))
2254+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2255 goto success;
2256 }
2257
2258@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2259 for (; vma; vma = vma->vm_next) {
2260 if (addr > limit)
2261 break;
2262- if (addr + len <= vma->vm_start)
2263+ if (check_heap_stack_gap(vma, addr, len))
2264 goto success;
2265 addr = vma->vm_end;
2266 }
2267@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2268 for (; vma; vma = vma->vm_next) {
2269 if (addr > limit)
2270 break;
2271- if (addr + len <= vma->vm_start)
2272+ if (check_heap_stack_gap(vma, addr, len))
2273 goto success;
2274 addr = vma->vm_end;
2275 }
2276diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
2277index c635028..6d9445a 100644
2278--- a/arch/h8300/include/asm/cache.h
2279+++ b/arch/h8300/include/asm/cache.h
2280@@ -1,8 +1,10 @@
2281 #ifndef __ARCH_H8300_CACHE_H
2282 #define __ARCH_H8300_CACHE_H
2283
2284+#include <linux/const.h>
2285+
2286 /* bytes per L1 cache line */
2287-#define L1_CACHE_BYTES 4
2288+#define L1_CACHE_BYTES _AC(4,UL)
2289
2290 /* m68k-elf-gcc 2.95.2 doesn't like these */
2291
2292diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
2293index 0f01de2..d37d309 100644
2294--- a/arch/hexagon/include/asm/cache.h
2295+++ b/arch/hexagon/include/asm/cache.h
2296@@ -21,9 +21,11 @@
2297 #ifndef __ASM_CACHE_H
2298 #define __ASM_CACHE_H
2299
2300+#include <linux/const.h>
2301+
2302 /* Bytes per L1 cache line */
2303-#define L1_CACHE_SHIFT (5)
2304-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2305+#define L1_CACHE_SHIFT 5
2306+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2307
2308 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
2309 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
2310diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2311index 7d91166..88ab87e 100644
2312--- a/arch/ia64/include/asm/atomic.h
2313+++ b/arch/ia64/include/asm/atomic.h
2314@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2315 #define atomic64_inc(v) atomic64_add(1, (v))
2316 #define atomic64_dec(v) atomic64_sub(1, (v))
2317
2318+#define atomic64_read_unchecked(v) atomic64_read(v)
2319+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2320+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2321+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2322+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2323+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2324+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2325+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2326+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2327+
2328 /* Atomic operations are already serializing */
2329 #define smp_mb__before_atomic_dec() barrier()
2330 #define smp_mb__after_atomic_dec() barrier()
2331diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2332index 988254a..e1ee885 100644
2333--- a/arch/ia64/include/asm/cache.h
2334+++ b/arch/ia64/include/asm/cache.h
2335@@ -1,6 +1,7 @@
2336 #ifndef _ASM_IA64_CACHE_H
2337 #define _ASM_IA64_CACHE_H
2338
2339+#include <linux/const.h>
2340
2341 /*
2342 * Copyright (C) 1998-2000 Hewlett-Packard Co
2343@@ -9,7 +10,7 @@
2344
2345 /* Bytes per L1 (data) cache line. */
2346 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2347-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2348+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2349
2350 #ifdef CONFIG_SMP
2351 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2352diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2353index b5298eb..67c6e62 100644
2354--- a/arch/ia64/include/asm/elf.h
2355+++ b/arch/ia64/include/asm/elf.h
2356@@ -42,6 +42,13 @@
2357 */
2358 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2359
2360+#ifdef CONFIG_PAX_ASLR
2361+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2362+
2363+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2364+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2365+#endif
2366+
2367 #define PT_IA_64_UNWIND 0x70000001
2368
2369 /* IA-64 relocations: */
2370diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
2371index 96a8d92..617a1cf 100644
2372--- a/arch/ia64/include/asm/pgalloc.h
2373+++ b/arch/ia64/include/asm/pgalloc.h
2374@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2375 pgd_val(*pgd_entry) = __pa(pud);
2376 }
2377
2378+static inline void
2379+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2380+{
2381+ pgd_populate(mm, pgd_entry, pud);
2382+}
2383+
2384 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
2385 {
2386 return quicklist_alloc(0, GFP_KERNEL, NULL);
2387@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2388 pud_val(*pud_entry) = __pa(pmd);
2389 }
2390
2391+static inline void
2392+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2393+{
2394+ pud_populate(mm, pud_entry, pmd);
2395+}
2396+
2397 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
2398 {
2399 return quicklist_alloc(0, GFP_KERNEL, NULL);
2400diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
2401index 815810c..d60bd4c 100644
2402--- a/arch/ia64/include/asm/pgtable.h
2403+++ b/arch/ia64/include/asm/pgtable.h
2404@@ -12,7 +12,7 @@
2405 * David Mosberger-Tang <davidm@hpl.hp.com>
2406 */
2407
2408-
2409+#include <linux/const.h>
2410 #include <asm/mman.h>
2411 #include <asm/page.h>
2412 #include <asm/processor.h>
2413@@ -142,6 +142,17 @@
2414 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2415 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2416 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2417+
2418+#ifdef CONFIG_PAX_PAGEEXEC
2419+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2420+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2421+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2422+#else
2423+# define PAGE_SHARED_NOEXEC PAGE_SHARED
2424+# define PAGE_READONLY_NOEXEC PAGE_READONLY
2425+# define PAGE_COPY_NOEXEC PAGE_COPY
2426+#endif
2427+
2428 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2429 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2430 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
2431diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
2432index 54ff557..70c88b7 100644
2433--- a/arch/ia64/include/asm/spinlock.h
2434+++ b/arch/ia64/include/asm/spinlock.h
2435@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
2436 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2437
2438 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2439- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2440+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2441 }
2442
2443 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
2444diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2445index 449c8c0..432a3d2 100644
2446--- a/arch/ia64/include/asm/uaccess.h
2447+++ b/arch/ia64/include/asm/uaccess.h
2448@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2449 const void *__cu_from = (from); \
2450 long __cu_len = (n); \
2451 \
2452- if (__access_ok(__cu_to, __cu_len, get_fs())) \
2453+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
2454 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2455 __cu_len; \
2456 })
2457@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2458 long __cu_len = (n); \
2459 \
2460 __chk_user_ptr(__cu_from); \
2461- if (__access_ok(__cu_from, __cu_len, get_fs())) \
2462+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
2463 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2464 __cu_len; \
2465 })
2466diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2467index 24603be..948052d 100644
2468--- a/arch/ia64/kernel/module.c
2469+++ b/arch/ia64/kernel/module.c
2470@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
2471 void
2472 module_free (struct module *mod, void *module_region)
2473 {
2474- if (mod && mod->arch.init_unw_table &&
2475- module_region == mod->module_init) {
2476+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2477 unw_remove_unwind_table(mod->arch.init_unw_table);
2478 mod->arch.init_unw_table = NULL;
2479 }
2480@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
2481 }
2482
2483 static inline int
2484+in_init_rx (const struct module *mod, uint64_t addr)
2485+{
2486+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2487+}
2488+
2489+static inline int
2490+in_init_rw (const struct module *mod, uint64_t addr)
2491+{
2492+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2493+}
2494+
2495+static inline int
2496 in_init (const struct module *mod, uint64_t addr)
2497 {
2498- return addr - (uint64_t) mod->module_init < mod->init_size;
2499+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2500+}
2501+
2502+static inline int
2503+in_core_rx (const struct module *mod, uint64_t addr)
2504+{
2505+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2506+}
2507+
2508+static inline int
2509+in_core_rw (const struct module *mod, uint64_t addr)
2510+{
2511+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2512 }
2513
2514 static inline int
2515 in_core (const struct module *mod, uint64_t addr)
2516 {
2517- return addr - (uint64_t) mod->module_core < mod->core_size;
2518+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2519 }
2520
2521 static inline int
2522@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
2523 break;
2524
2525 case RV_BDREL:
2526- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2527+ if (in_init_rx(mod, val))
2528+ val -= (uint64_t) mod->module_init_rx;
2529+ else if (in_init_rw(mod, val))
2530+ val -= (uint64_t) mod->module_init_rw;
2531+ else if (in_core_rx(mod, val))
2532+ val -= (uint64_t) mod->module_core_rx;
2533+ else if (in_core_rw(mod, val))
2534+ val -= (uint64_t) mod->module_core_rw;
2535 break;
2536
2537 case RV_LTV:
2538@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
2539 * addresses have been selected...
2540 */
2541 uint64_t gp;
2542- if (mod->core_size > MAX_LTOFF)
2543+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2544 /*
2545 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2546 * at the end of the module.
2547 */
2548- gp = mod->core_size - MAX_LTOFF / 2;
2549+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2550 else
2551- gp = mod->core_size / 2;
2552- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2553+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2554+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2555 mod->arch.gp = gp;
2556 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2557 }
2558diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2559index 609d500..7dde2a8 100644
2560--- a/arch/ia64/kernel/sys_ia64.c
2561+++ b/arch/ia64/kernel/sys_ia64.c
2562@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2563 if (REGION_NUMBER(addr) == RGN_HPAGE)
2564 addr = 0;
2565 #endif
2566+
2567+#ifdef CONFIG_PAX_RANDMMAP
2568+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2569+ addr = mm->free_area_cache;
2570+ else
2571+#endif
2572+
2573 if (!addr)
2574 addr = mm->free_area_cache;
2575
2576@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2577 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2578 /* At this point: (!vma || addr < vma->vm_end). */
2579 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2580- if (start_addr != TASK_UNMAPPED_BASE) {
2581+ if (start_addr != mm->mmap_base) {
2582 /* Start a new search --- just in case we missed some holes. */
2583- addr = TASK_UNMAPPED_BASE;
2584+ addr = mm->mmap_base;
2585 goto full_search;
2586 }
2587 return -ENOMEM;
2588 }
2589- if (!vma || addr + len <= vma->vm_start) {
2590+ if (check_heap_stack_gap(vma, addr, len)) {
2591 /* Remember the address where we stopped this search: */
2592 mm->free_area_cache = addr + len;
2593 return addr;
2594diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2595index 0ccb28f..8992469 100644
2596--- a/arch/ia64/kernel/vmlinux.lds.S
2597+++ b/arch/ia64/kernel/vmlinux.lds.S
2598@@ -198,7 +198,7 @@ SECTIONS {
2599 /* Per-cpu data: */
2600 . = ALIGN(PERCPU_PAGE_SIZE);
2601 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
2602- __phys_per_cpu_start = __per_cpu_load;
2603+ __phys_per_cpu_start = per_cpu_load;
2604 /*
2605 * ensure percpu data fits
2606 * into percpu page size
2607diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2608index 02d29c2..ea893df 100644
2609--- a/arch/ia64/mm/fault.c
2610+++ b/arch/ia64/mm/fault.c
2611@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
2612 return pte_present(pte);
2613 }
2614
2615+#ifdef CONFIG_PAX_PAGEEXEC
2616+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2617+{
2618+ unsigned long i;
2619+
2620+ printk(KERN_ERR "PAX: bytes at PC: ");
2621+ for (i = 0; i < 8; i++) {
2622+ unsigned int c;
2623+ if (get_user(c, (unsigned int *)pc+i))
2624+ printk(KERN_CONT "???????? ");
2625+ else
2626+ printk(KERN_CONT "%08x ", c);
2627+ }
2628+ printk("\n");
2629+}
2630+#endif
2631+
2632 void __kprobes
2633 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2634 {
2635@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
2636 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2637 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2638
2639- if ((vma->vm_flags & mask) != mask)
2640+ if ((vma->vm_flags & mask) != mask) {
2641+
2642+#ifdef CONFIG_PAX_PAGEEXEC
2643+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2644+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2645+ goto bad_area;
2646+
2647+ up_read(&mm->mmap_sem);
2648+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2649+ do_group_exit(SIGKILL);
2650+ }
2651+#endif
2652+
2653 goto bad_area;
2654
2655+ }
2656+
2657 /*
2658 * If for any reason at all we couldn't handle the fault, make
2659 * sure we exit gracefully rather than endlessly redo the
2660diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2661index 5ca674b..e0e1b70 100644
2662--- a/arch/ia64/mm/hugetlbpage.c
2663+++ b/arch/ia64/mm/hugetlbpage.c
2664@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
2665 /* At this point: (!vmm || addr < vmm->vm_end). */
2666 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2667 return -ENOMEM;
2668- if (!vmm || (addr + len) <= vmm->vm_start)
2669+ if (check_heap_stack_gap(vmm, addr, len))
2670 return addr;
2671 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2672 }
2673diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
2674index 0eab454..bd794f2 100644
2675--- a/arch/ia64/mm/init.c
2676+++ b/arch/ia64/mm/init.c
2677@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
2678 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2679 vma->vm_end = vma->vm_start + PAGE_SIZE;
2680 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2681+
2682+#ifdef CONFIG_PAX_PAGEEXEC
2683+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2684+ vma->vm_flags &= ~VM_EXEC;
2685+
2686+#ifdef CONFIG_PAX_MPROTECT
2687+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
2688+ vma->vm_flags &= ~VM_MAYEXEC;
2689+#endif
2690+
2691+ }
2692+#endif
2693+
2694 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2695 down_write(&current->mm->mmap_sem);
2696 if (insert_vm_struct(current->mm, vma)) {
2697diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2698index 40b3ee9..8c2c112 100644
2699--- a/arch/m32r/include/asm/cache.h
2700+++ b/arch/m32r/include/asm/cache.h
2701@@ -1,8 +1,10 @@
2702 #ifndef _ASM_M32R_CACHE_H
2703 #define _ASM_M32R_CACHE_H
2704
2705+#include <linux/const.h>
2706+
2707 /* L1 cache line size */
2708 #define L1_CACHE_SHIFT 4
2709-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2710+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2711
2712 #endif /* _ASM_M32R_CACHE_H */
2713diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2714index 82abd15..d95ae5d 100644
2715--- a/arch/m32r/lib/usercopy.c
2716+++ b/arch/m32r/lib/usercopy.c
2717@@ -14,6 +14,9 @@
2718 unsigned long
2719 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2720 {
2721+ if ((long)n < 0)
2722+ return n;
2723+
2724 prefetch(from);
2725 if (access_ok(VERIFY_WRITE, to, n))
2726 __copy_user(to,from,n);
2727@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2728 unsigned long
2729 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2730 {
2731+ if ((long)n < 0)
2732+ return n;
2733+
2734 prefetchw(to);
2735 if (access_ok(VERIFY_READ, from, n))
2736 __copy_user_zeroing(to,from,n);
2737diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2738index 0395c51..5f26031 100644
2739--- a/arch/m68k/include/asm/cache.h
2740+++ b/arch/m68k/include/asm/cache.h
2741@@ -4,9 +4,11 @@
2742 #ifndef __ARCH_M68K_CACHE_H
2743 #define __ARCH_M68K_CACHE_H
2744
2745+#include <linux/const.h>
2746+
2747 /* bytes per L1 cache line */
2748 #define L1_CACHE_SHIFT 4
2749-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2750+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2751
2752 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2753
2754diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2755index 4efe96a..60e8699 100644
2756--- a/arch/microblaze/include/asm/cache.h
2757+++ b/arch/microblaze/include/asm/cache.h
2758@@ -13,11 +13,12 @@
2759 #ifndef _ASM_MICROBLAZE_CACHE_H
2760 #define _ASM_MICROBLAZE_CACHE_H
2761
2762+#include <linux/const.h>
2763 #include <asm/registers.h>
2764
2765 #define L1_CACHE_SHIFT 5
2766 /* word-granular cache in microblaze */
2767-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2768+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2769
2770 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2771
2772diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2773index 3f4c5cb..3439c6e 100644
2774--- a/arch/mips/include/asm/atomic.h
2775+++ b/arch/mips/include/asm/atomic.h
2776@@ -21,6 +21,10 @@
2777 #include <asm/cmpxchg.h>
2778 #include <asm/war.h>
2779
2780+#ifdef CONFIG_GENERIC_ATOMIC64
2781+#include <asm-generic/atomic64.h>
2782+#endif
2783+
2784 #define ATOMIC_INIT(i) { (i) }
2785
2786 /*
2787@@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2788 */
2789 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2790
2791+#define atomic64_read_unchecked(v) atomic64_read(v)
2792+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2793+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2794+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2795+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2796+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2797+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2798+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2799+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2800+
2801 #endif /* CONFIG_64BIT */
2802
2803 /*
2804diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2805index b4db69f..8f3b093 100644
2806--- a/arch/mips/include/asm/cache.h
2807+++ b/arch/mips/include/asm/cache.h
2808@@ -9,10 +9,11 @@
2809 #ifndef _ASM_CACHE_H
2810 #define _ASM_CACHE_H
2811
2812+#include <linux/const.h>
2813 #include <kmalloc.h>
2814
2815 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2816-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2817+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2818
2819 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2820 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2821diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2822index 455c0ac..ad65fbe 100644
2823--- a/arch/mips/include/asm/elf.h
2824+++ b/arch/mips/include/asm/elf.h
2825@@ -372,13 +372,16 @@ extern const char *__elf_platform;
2826 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2827 #endif
2828
2829+#ifdef CONFIG_PAX_ASLR
2830+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2831+
2832+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2833+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2834+#endif
2835+
2836 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2837 struct linux_binprm;
2838 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2839 int uses_interp);
2840
2841-struct mm_struct;
2842-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2843-#define arch_randomize_brk arch_randomize_brk
2844-
2845 #endif /* _ASM_ELF_H */
2846diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
2847index c1f6afa..38cc6e9 100644
2848--- a/arch/mips/include/asm/exec.h
2849+++ b/arch/mips/include/asm/exec.h
2850@@ -12,6 +12,6 @@
2851 #ifndef _ASM_EXEC_H
2852 #define _ASM_EXEC_H
2853
2854-extern unsigned long arch_align_stack(unsigned long sp);
2855+#define arch_align_stack(x) ((x) & ~0xfUL)
2856
2857 #endif /* _ASM_EXEC_H */
2858diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2859index da9bd7d..91aa7ab 100644
2860--- a/arch/mips/include/asm/page.h
2861+++ b/arch/mips/include/asm/page.h
2862@@ -98,7 +98,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2863 #ifdef CONFIG_CPU_MIPS32
2864 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2865 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2866- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2867+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2868 #else
2869 typedef struct { unsigned long long pte; } pte_t;
2870 #define pte_val(x) ((x).pte)
2871diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
2872index 881d18b..cea38bc 100644
2873--- a/arch/mips/include/asm/pgalloc.h
2874+++ b/arch/mips/include/asm/pgalloc.h
2875@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2876 {
2877 set_pud(pud, __pud((unsigned long)pmd));
2878 }
2879+
2880+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2881+{
2882+ pud_populate(mm, pud, pmd);
2883+}
2884 #endif
2885
2886 /*
2887diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
2888index 0d85d8e..ec71487 100644
2889--- a/arch/mips/include/asm/thread_info.h
2890+++ b/arch/mips/include/asm/thread_info.h
2891@@ -123,6 +123,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
2892 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
2893 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
2894 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
2895+/* li takes a 32bit immediate */
2896+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
2897 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
2898
2899 #ifdef CONFIG_MIPS32_O32
2900@@ -146,15 +148,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
2901 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
2902 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
2903 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
2904+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
2905+
2906+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2907
2908 /* work to do in syscall_trace_leave() */
2909-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
2910+#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2911
2912 /* work to do on interrupt/exception return */
2913 #define _TIF_WORK_MASK (0x0000ffef & \
2914 ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
2915 /* work to do on any return to u-space */
2916-#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)
2917+#define _TIF_ALLWORK_MASK ((0x8000ffff & ~_TIF_SECCOMP) | _TIF_GRSEC_SETXID)
2918
2919 #endif /* __KERNEL__ */
2920
2921diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2922index 9fdd8bc..4bd7f1a 100644
2923--- a/arch/mips/kernel/binfmt_elfn32.c
2924+++ b/arch/mips/kernel/binfmt_elfn32.c
2925@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2926 #undef ELF_ET_DYN_BASE
2927 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2928
2929+#ifdef CONFIG_PAX_ASLR
2930+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2931+
2932+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2933+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2934+#endif
2935+
2936 #include <asm/processor.h>
2937 #include <linux/module.h>
2938 #include <linux/elfcore.h>
2939diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2940index ff44823..97f8906 100644
2941--- a/arch/mips/kernel/binfmt_elfo32.c
2942+++ b/arch/mips/kernel/binfmt_elfo32.c
2943@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2944 #undef ELF_ET_DYN_BASE
2945 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2946
2947+#ifdef CONFIG_PAX_ASLR
2948+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2949+
2950+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2951+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2952+#endif
2953+
2954 #include <asm/processor.h>
2955
2956 /*
2957diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2958index e9a5fd7..378809a 100644
2959--- a/arch/mips/kernel/process.c
2960+++ b/arch/mips/kernel/process.c
2961@@ -480,15 +480,3 @@ unsigned long get_wchan(struct task_struct *task)
2962 out:
2963 return pc;
2964 }
2965-
2966-/*
2967- * Don't forget that the stack pointer must be aligned on a 8 bytes
2968- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2969- */
2970-unsigned long arch_align_stack(unsigned long sp)
2971-{
2972- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2973- sp -= get_random_int() & ~PAGE_MASK;
2974-
2975- return sp & ALMASK;
2976-}
2977diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
2978index 7c24c29..e2f1981 100644
2979--- a/arch/mips/kernel/ptrace.c
2980+++ b/arch/mips/kernel/ptrace.c
2981@@ -528,6 +528,10 @@ static inline int audit_arch(void)
2982 return arch;
2983 }
2984
2985+#ifdef CONFIG_GRKERNSEC_SETXID
2986+extern void gr_delayed_cred_worker(void);
2987+#endif
2988+
2989 /*
2990 * Notification of system call entry/exit
2991 * - triggered by current->work.syscall_trace
2992@@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
2993 /* do the secure computing check first */
2994 secure_computing(regs->regs[2]);
2995
2996+#ifdef CONFIG_GRKERNSEC_SETXID
2997+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2998+ gr_delayed_cred_worker();
2999+#endif
3000+
3001 if (!(current->ptrace & PT_PTRACED))
3002 goto out;
3003
3004diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
3005index a632bc1..0b77c7c 100644
3006--- a/arch/mips/kernel/scall32-o32.S
3007+++ b/arch/mips/kernel/scall32-o32.S
3008@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3009
3010 stack_done:
3011 lw t0, TI_FLAGS($28) # syscall tracing enabled?
3012- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3013+ li t1, _TIF_SYSCALL_WORK
3014 and t0, t1
3015 bnez t0, syscall_trace_entry # -> yes
3016
3017diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
3018index 3b5a5e9..e1ee86d 100644
3019--- a/arch/mips/kernel/scall64-64.S
3020+++ b/arch/mips/kernel/scall64-64.S
3021@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
3022
3023 sd a3, PT_R26(sp) # save a3 for syscall restarting
3024
3025- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3026+ li t1, _TIF_SYSCALL_WORK
3027 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3028 and t0, t1, t0
3029 bnez t0, syscall_trace_entry
3030diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
3031index 6be6f70..1859577 100644
3032--- a/arch/mips/kernel/scall64-n32.S
3033+++ b/arch/mips/kernel/scall64-n32.S
3034@@ -53,7 +53,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
3035
3036 sd a3, PT_R26(sp) # save a3 for syscall restarting
3037
3038- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3039+ li t1, _TIF_SYSCALL_WORK
3040 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3041 and t0, t1, t0
3042 bnez t0, n32_syscall_trace_entry
3043diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
3044index 5422855..74e63a3 100644
3045--- a/arch/mips/kernel/scall64-o32.S
3046+++ b/arch/mips/kernel/scall64-o32.S
3047@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3048 PTR 4b, bad_stack
3049 .previous
3050
3051- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3052+ li t1, _TIF_SYSCALL_WORK
3053 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3054 and t0, t1, t0
3055 bnez t0, trace_a_syscall
3056diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
3057index c14f6df..537e729 100644
3058--- a/arch/mips/mm/fault.c
3059+++ b/arch/mips/mm/fault.c
3060@@ -27,6 +27,23 @@
3061 #include <asm/highmem.h> /* For VMALLOC_END */
3062 #include <linux/kdebug.h>
3063
3064+#ifdef CONFIG_PAX_PAGEEXEC
3065+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3066+{
3067+ unsigned long i;
3068+
3069+ printk(KERN_ERR "PAX: bytes at PC: ");
3070+ for (i = 0; i < 5; i++) {
3071+ unsigned int c;
3072+ if (get_user(c, (unsigned int *)pc+i))
3073+ printk(KERN_CONT "???????? ");
3074+ else
3075+ printk(KERN_CONT "%08x ", c);
3076+ }
3077+ printk("\n");
3078+}
3079+#endif
3080+
3081 /*
3082 * This routine handles page faults. It determines the address,
3083 * and the problem, and then passes it off to one of the appropriate
3084diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
3085index 302d779..7d35bf8 100644
3086--- a/arch/mips/mm/mmap.c
3087+++ b/arch/mips/mm/mmap.c
3088@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3089 do_color_align = 1;
3090
3091 /* requesting a specific address */
3092+
3093+#ifdef CONFIG_PAX_RANDMMAP
3094+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
3095+#endif
3096+
3097 if (addr) {
3098 if (do_color_align)
3099 addr = COLOUR_ALIGN(addr, pgoff);
3100@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3101 addr = PAGE_ALIGN(addr);
3102
3103 vma = find_vma(mm, addr);
3104- if (TASK_SIZE - len >= addr &&
3105- (!vma || addr + len <= vma->vm_start))
3106+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
3107 return addr;
3108 }
3109
3110@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3111 /* At this point: (!vma || addr < vma->vm_end). */
3112 if (TASK_SIZE - len < addr)
3113 return -ENOMEM;
3114- if (!vma || addr + len <= vma->vm_start)
3115+ if (check_heap_stack_gap(vmm, addr, len))
3116 return addr;
3117 addr = vma->vm_end;
3118 if (do_color_align)
3119@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3120 /* make sure it can fit in the remaining address space */
3121 if (likely(addr > len)) {
3122 vma = find_vma(mm, addr - len);
3123- if (!vma || addr <= vma->vm_start) {
3124+ if (check_heap_stack_gap(vmm, addr - len, len))
3125 /* cache the address as a hint for next time */
3126 return mm->free_area_cache = addr - len;
3127 }
3128@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3129 * return with success:
3130 */
3131 vma = find_vma(mm, addr);
3132- if (likely(!vma || addr + len <= vma->vm_start)) {
3133+ if (check_heap_stack_gap(vmm, addr, len)) {
3134 /* cache the address as a hint for next time */
3135 return mm->free_area_cache = addr;
3136 }
3137@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3138 mm->unmap_area = arch_unmap_area_topdown;
3139 }
3140 }
3141-
3142-static inline unsigned long brk_rnd(void)
3143-{
3144- unsigned long rnd = get_random_int();
3145-
3146- rnd = rnd << PAGE_SHIFT;
3147- /* 8MB for 32bit, 256MB for 64bit */
3148- if (TASK_IS_32BIT_ADDR)
3149- rnd = rnd & 0x7ffffful;
3150- else
3151- rnd = rnd & 0xffffffful;
3152-
3153- return rnd;
3154-}
3155-
3156-unsigned long arch_randomize_brk(struct mm_struct *mm)
3157-{
3158- unsigned long base = mm->brk;
3159- unsigned long ret;
3160-
3161- ret = PAGE_ALIGN(base + brk_rnd());
3162-
3163- if (ret < mm->brk)
3164- return mm->brk;
3165-
3166- return ret;
3167-}
3168diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3169index 967d144..db12197 100644
3170--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
3171+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3172@@ -11,12 +11,14 @@
3173 #ifndef _ASM_PROC_CACHE_H
3174 #define _ASM_PROC_CACHE_H
3175
3176+#include <linux/const.h>
3177+
3178 /* L1 cache */
3179
3180 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3181 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
3182-#define L1_CACHE_BYTES 16 /* bytes per entry */
3183 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
3184+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3185 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
3186
3187 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3188diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3189index bcb5df2..84fabd2 100644
3190--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3191+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3192@@ -16,13 +16,15 @@
3193 #ifndef _ASM_PROC_CACHE_H
3194 #define _ASM_PROC_CACHE_H
3195
3196+#include <linux/const.h>
3197+
3198 /*
3199 * L1 cache
3200 */
3201 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3202 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
3203-#define L1_CACHE_BYTES 32 /* bytes per entry */
3204 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
3205+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3206 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
3207
3208 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3209diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
3210index 4ce7a01..449202a 100644
3211--- a/arch/openrisc/include/asm/cache.h
3212+++ b/arch/openrisc/include/asm/cache.h
3213@@ -19,11 +19,13 @@
3214 #ifndef __ASM_OPENRISC_CACHE_H
3215 #define __ASM_OPENRISC_CACHE_H
3216
3217+#include <linux/const.h>
3218+
3219 /* FIXME: How can we replace these with values from the CPU...
3220 * they shouldn't be hard-coded!
3221 */
3222
3223-#define L1_CACHE_BYTES 16
3224 #define L1_CACHE_SHIFT 4
3225+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3226
3227 #endif /* __ASM_OPENRISC_CACHE_H */
3228diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
3229index 6c6defc..d30653d 100644
3230--- a/arch/parisc/include/asm/atomic.h
3231+++ b/arch/parisc/include/asm/atomic.h
3232@@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3233
3234 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3235
3236+#define atomic64_read_unchecked(v) atomic64_read(v)
3237+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3238+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3239+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3240+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3241+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3242+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3243+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3244+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3245+
3246 #endif /* !CONFIG_64BIT */
3247
3248
3249diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
3250index 47f11c7..3420df2 100644
3251--- a/arch/parisc/include/asm/cache.h
3252+++ b/arch/parisc/include/asm/cache.h
3253@@ -5,6 +5,7 @@
3254 #ifndef __ARCH_PARISC_CACHE_H
3255 #define __ARCH_PARISC_CACHE_H
3256
3257+#include <linux/const.h>
3258
3259 /*
3260 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
3261@@ -15,13 +16,13 @@
3262 * just ruin performance.
3263 */
3264 #ifdef CONFIG_PA20
3265-#define L1_CACHE_BYTES 64
3266 #define L1_CACHE_SHIFT 6
3267 #else
3268-#define L1_CACHE_BYTES 32
3269 #define L1_CACHE_SHIFT 5
3270 #endif
3271
3272+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3273+
3274 #ifndef __ASSEMBLY__
3275
3276 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3277diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
3278index 19f6cb1..6c78cf2 100644
3279--- a/arch/parisc/include/asm/elf.h
3280+++ b/arch/parisc/include/asm/elf.h
3281@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
3282
3283 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
3284
3285+#ifdef CONFIG_PAX_ASLR
3286+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3287+
3288+#define PAX_DELTA_MMAP_LEN 16
3289+#define PAX_DELTA_STACK_LEN 16
3290+#endif
3291+
3292 /* This yields a mask that user programs can use to figure out what
3293 instruction set this CPU supports. This could be done in user space,
3294 but it's not easy, and we've already done it here. */
3295diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
3296index fc987a1..6e068ef 100644
3297--- a/arch/parisc/include/asm/pgalloc.h
3298+++ b/arch/parisc/include/asm/pgalloc.h
3299@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3300 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
3301 }
3302
3303+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3304+{
3305+ pgd_populate(mm, pgd, pmd);
3306+}
3307+
3308 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
3309 {
3310 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
3311@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
3312 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
3313 #define pmd_free(mm, x) do { } while (0)
3314 #define pgd_populate(mm, pmd, pte) BUG()
3315+#define pgd_populate_kernel(mm, pmd, pte) BUG()
3316
3317 #endif
3318
3319diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
3320index ee99f23..802b0a1 100644
3321--- a/arch/parisc/include/asm/pgtable.h
3322+++ b/arch/parisc/include/asm/pgtable.h
3323@@ -212,6 +212,17 @@ struct vm_area_struct;
3324 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
3325 #define PAGE_COPY PAGE_EXECREAD
3326 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
3327+
3328+#ifdef CONFIG_PAX_PAGEEXEC
3329+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
3330+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3331+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3332+#else
3333+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3334+# define PAGE_COPY_NOEXEC PAGE_COPY
3335+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3336+#endif
3337+
3338 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
3339 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
3340 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
3341diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
3342index 5e34ccf..672bc9c 100644
3343--- a/arch/parisc/kernel/module.c
3344+++ b/arch/parisc/kernel/module.c
3345@@ -98,16 +98,38 @@
3346
3347 /* three functions to determine where in the module core
3348 * or init pieces the location is */
3349+static inline int in_init_rx(struct module *me, void *loc)
3350+{
3351+ return (loc >= me->module_init_rx &&
3352+ loc < (me->module_init_rx + me->init_size_rx));
3353+}
3354+
3355+static inline int in_init_rw(struct module *me, void *loc)
3356+{
3357+ return (loc >= me->module_init_rw &&
3358+ loc < (me->module_init_rw + me->init_size_rw));
3359+}
3360+
3361 static inline int in_init(struct module *me, void *loc)
3362 {
3363- return (loc >= me->module_init &&
3364- loc <= (me->module_init + me->init_size));
3365+ return in_init_rx(me, loc) || in_init_rw(me, loc);
3366+}
3367+
3368+static inline int in_core_rx(struct module *me, void *loc)
3369+{
3370+ return (loc >= me->module_core_rx &&
3371+ loc < (me->module_core_rx + me->core_size_rx));
3372+}
3373+
3374+static inline int in_core_rw(struct module *me, void *loc)
3375+{
3376+ return (loc >= me->module_core_rw &&
3377+ loc < (me->module_core_rw + me->core_size_rw));
3378 }
3379
3380 static inline int in_core(struct module *me, void *loc)
3381 {
3382- return (loc >= me->module_core &&
3383- loc <= (me->module_core + me->core_size));
3384+ return in_core_rx(me, loc) || in_core_rw(me, loc);
3385 }
3386
3387 static inline int in_local(struct module *me, void *loc)
3388@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
3389 }
3390
3391 /* align things a bit */
3392- me->core_size = ALIGN(me->core_size, 16);
3393- me->arch.got_offset = me->core_size;
3394- me->core_size += gots * sizeof(struct got_entry);
3395+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
3396+ me->arch.got_offset = me->core_size_rw;
3397+ me->core_size_rw += gots * sizeof(struct got_entry);
3398
3399- me->core_size = ALIGN(me->core_size, 16);
3400- me->arch.fdesc_offset = me->core_size;
3401- me->core_size += fdescs * sizeof(Elf_Fdesc);
3402+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
3403+ me->arch.fdesc_offset = me->core_size_rw;
3404+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3405
3406 me->arch.got_max = gots;
3407 me->arch.fdesc_max = fdescs;
3408@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3409
3410 BUG_ON(value == 0);
3411
3412- got = me->module_core + me->arch.got_offset;
3413+ got = me->module_core_rw + me->arch.got_offset;
3414 for (i = 0; got[i].addr; i++)
3415 if (got[i].addr == value)
3416 goto out;
3417@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3418 #ifdef CONFIG_64BIT
3419 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3420 {
3421- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3422+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3423
3424 if (!value) {
3425 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
3426@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3427
3428 /* Create new one */
3429 fdesc->addr = value;
3430- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3431+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3432 return (Elf_Addr)fdesc;
3433 }
3434 #endif /* CONFIG_64BIT */
3435@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
3436
3437 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3438 end = table + sechdrs[me->arch.unwind_section].sh_size;
3439- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3440+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3441
3442 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3443 me->arch.unwind_section, table, end, gp);
3444diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3445index c9b9322..02d8940 100644
3446--- a/arch/parisc/kernel/sys_parisc.c
3447+++ b/arch/parisc/kernel/sys_parisc.c
3448@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
3449 /* At this point: (!vma || addr < vma->vm_end). */
3450 if (TASK_SIZE - len < addr)
3451 return -ENOMEM;
3452- if (!vma || addr + len <= vma->vm_start)
3453+ if (check_heap_stack_gap(vma, addr, len))
3454 return addr;
3455 addr = vma->vm_end;
3456 }
3457@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
3458 /* At this point: (!vma || addr < vma->vm_end). */
3459 if (TASK_SIZE - len < addr)
3460 return -ENOMEM;
3461- if (!vma || addr + len <= vma->vm_start)
3462+ if (check_heap_stack_gap(vma, addr, len))
3463 return addr;
3464 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3465 if (addr < vma->vm_end) /* handle wraparound */
3466@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3467 if (flags & MAP_FIXED)
3468 return addr;
3469 if (!addr)
3470- addr = TASK_UNMAPPED_BASE;
3471+ addr = current->mm->mmap_base;
3472
3473 if (filp) {
3474 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
3475diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3476index 45ba99f..8e22c33 100644
3477--- a/arch/parisc/kernel/traps.c
3478+++ b/arch/parisc/kernel/traps.c
3479@@ -732,9 +732,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
3480
3481 down_read(&current->mm->mmap_sem);
3482 vma = find_vma(current->mm,regs->iaoq[0]);
3483- if (vma && (regs->iaoq[0] >= vma->vm_start)
3484- && (vma->vm_flags & VM_EXEC)) {
3485-
3486+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3487 fault_address = regs->iaoq[0];
3488 fault_space = regs->iasq[0];
3489
3490diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3491index 18162ce..94de376 100644
3492--- a/arch/parisc/mm/fault.c
3493+++ b/arch/parisc/mm/fault.c
3494@@ -15,6 +15,7 @@
3495 #include <linux/sched.h>
3496 #include <linux/interrupt.h>
3497 #include <linux/module.h>
3498+#include <linux/unistd.h>
3499
3500 #include <asm/uaccess.h>
3501 #include <asm/traps.h>
3502@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
3503 static unsigned long
3504 parisc_acctyp(unsigned long code, unsigned int inst)
3505 {
3506- if (code == 6 || code == 16)
3507+ if (code == 6 || code == 7 || code == 16)
3508 return VM_EXEC;
3509
3510 switch (inst & 0xf0000000) {
3511@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
3512 }
3513 #endif
3514
3515+#ifdef CONFIG_PAX_PAGEEXEC
3516+/*
3517+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3518+ *
3519+ * returns 1 when task should be killed
3520+ * 2 when rt_sigreturn trampoline was detected
3521+ * 3 when unpatched PLT trampoline was detected
3522+ */
3523+static int pax_handle_fetch_fault(struct pt_regs *regs)
3524+{
3525+
3526+#ifdef CONFIG_PAX_EMUPLT
3527+ int err;
3528+
3529+ do { /* PaX: unpatched PLT emulation */
3530+ unsigned int bl, depwi;
3531+
3532+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3533+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3534+
3535+ if (err)
3536+ break;
3537+
3538+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3539+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3540+
3541+ err = get_user(ldw, (unsigned int *)addr);
3542+ err |= get_user(bv, (unsigned int *)(addr+4));
3543+ err |= get_user(ldw2, (unsigned int *)(addr+8));
3544+
3545+ if (err)
3546+ break;
3547+
3548+ if (ldw == 0x0E801096U &&
3549+ bv == 0xEAC0C000U &&
3550+ ldw2 == 0x0E881095U)
3551+ {
3552+ unsigned int resolver, map;
3553+
3554+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3555+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3556+ if (err)
3557+ break;
3558+
3559+ regs->gr[20] = instruction_pointer(regs)+8;
3560+ regs->gr[21] = map;
3561+ regs->gr[22] = resolver;
3562+ regs->iaoq[0] = resolver | 3UL;
3563+ regs->iaoq[1] = regs->iaoq[0] + 4;
3564+ return 3;
3565+ }
3566+ }
3567+ } while (0);
3568+#endif
3569+
3570+#ifdef CONFIG_PAX_EMUTRAMP
3571+
3572+#ifndef CONFIG_PAX_EMUSIGRT
3573+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3574+ return 1;
3575+#endif
3576+
3577+ do { /* PaX: rt_sigreturn emulation */
3578+ unsigned int ldi1, ldi2, bel, nop;
3579+
3580+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3581+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3582+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3583+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3584+
3585+ if (err)
3586+ break;
3587+
3588+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3589+ ldi2 == 0x3414015AU &&
3590+ bel == 0xE4008200U &&
3591+ nop == 0x08000240U)
3592+ {
3593+ regs->gr[25] = (ldi1 & 2) >> 1;
3594+ regs->gr[20] = __NR_rt_sigreturn;
3595+ regs->gr[31] = regs->iaoq[1] + 16;
3596+ regs->sr[0] = regs->iasq[1];
3597+ regs->iaoq[0] = 0x100UL;
3598+ regs->iaoq[1] = regs->iaoq[0] + 4;
3599+ regs->iasq[0] = regs->sr[2];
3600+ regs->iasq[1] = regs->sr[2];
3601+ return 2;
3602+ }
3603+ } while (0);
3604+#endif
3605+
3606+ return 1;
3607+}
3608+
3609+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3610+{
3611+ unsigned long i;
3612+
3613+ printk(KERN_ERR "PAX: bytes at PC: ");
3614+ for (i = 0; i < 5; i++) {
3615+ unsigned int c;
3616+ if (get_user(c, (unsigned int *)pc+i))
3617+ printk(KERN_CONT "???????? ");
3618+ else
3619+ printk(KERN_CONT "%08x ", c);
3620+ }
3621+ printk("\n");
3622+}
3623+#endif
3624+
3625 int fixup_exception(struct pt_regs *regs)
3626 {
3627 const struct exception_table_entry *fix;
3628@@ -192,8 +303,33 @@ good_area:
3629
3630 acc_type = parisc_acctyp(code,regs->iir);
3631
3632- if ((vma->vm_flags & acc_type) != acc_type)
3633+ if ((vma->vm_flags & acc_type) != acc_type) {
3634+
3635+#ifdef CONFIG_PAX_PAGEEXEC
3636+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3637+ (address & ~3UL) == instruction_pointer(regs))
3638+ {
3639+ up_read(&mm->mmap_sem);
3640+ switch (pax_handle_fetch_fault(regs)) {
3641+
3642+#ifdef CONFIG_PAX_EMUPLT
3643+ case 3:
3644+ return;
3645+#endif
3646+
3647+#ifdef CONFIG_PAX_EMUTRAMP
3648+ case 2:
3649+ return;
3650+#endif
3651+
3652+ }
3653+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3654+ do_group_exit(SIGKILL);
3655+ }
3656+#endif
3657+
3658 goto bad_area;
3659+ }
3660
3661 /*
3662 * If for any reason at all we couldn't handle the fault, make
3663diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
3664index da29032..f76c24c 100644
3665--- a/arch/powerpc/include/asm/atomic.h
3666+++ b/arch/powerpc/include/asm/atomic.h
3667@@ -522,6 +522,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
3668 return t1;
3669 }
3670
3671+#define atomic64_read_unchecked(v) atomic64_read(v)
3672+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3673+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3674+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3675+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3676+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3677+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3678+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3679+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3680+
3681 #endif /* __powerpc64__ */
3682
3683 #endif /* __KERNEL__ */
3684diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3685index 9e495c9..b6878e5 100644
3686--- a/arch/powerpc/include/asm/cache.h
3687+++ b/arch/powerpc/include/asm/cache.h
3688@@ -3,6 +3,7 @@
3689
3690 #ifdef __KERNEL__
3691
3692+#include <linux/const.h>
3693
3694 /* bytes per L1 cache line */
3695 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3696@@ -22,7 +23,7 @@
3697 #define L1_CACHE_SHIFT 7
3698 #endif
3699
3700-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3701+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3702
3703 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3704
3705diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3706index 3bf9cca..e7457d0 100644
3707--- a/arch/powerpc/include/asm/elf.h
3708+++ b/arch/powerpc/include/asm/elf.h
3709@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
3710 the loader. We need to make sure that it is out of the way of the program
3711 that it will "exec", and that there is sufficient room for the brk. */
3712
3713-extern unsigned long randomize_et_dyn(unsigned long base);
3714-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3715+#define ELF_ET_DYN_BASE (0x20000000)
3716+
3717+#ifdef CONFIG_PAX_ASLR
3718+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3719+
3720+#ifdef __powerpc64__
3721+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
3722+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
3723+#else
3724+#define PAX_DELTA_MMAP_LEN 15
3725+#define PAX_DELTA_STACK_LEN 15
3726+#endif
3727+#endif
3728
3729 /*
3730 * Our registers are always unsigned longs, whether we're a 32 bit
3731@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3732 (0x7ff >> (PAGE_SHIFT - 12)) : \
3733 (0x3ffff >> (PAGE_SHIFT - 12)))
3734
3735-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3736-#define arch_randomize_brk arch_randomize_brk
3737-
3738 #endif /* __KERNEL__ */
3739
3740 /*
3741diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
3742index 8196e9c..d83a9f3 100644
3743--- a/arch/powerpc/include/asm/exec.h
3744+++ b/arch/powerpc/include/asm/exec.h
3745@@ -4,6 +4,6 @@
3746 #ifndef _ASM_POWERPC_EXEC_H
3747 #define _ASM_POWERPC_EXEC_H
3748
3749-extern unsigned long arch_align_stack(unsigned long sp);
3750+#define arch_align_stack(x) ((x) & ~0xfUL)
3751
3752 #endif /* _ASM_POWERPC_EXEC_H */
3753diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3754index bca8fdc..61e9580 100644
3755--- a/arch/powerpc/include/asm/kmap_types.h
3756+++ b/arch/powerpc/include/asm/kmap_types.h
3757@@ -27,6 +27,7 @@ enum km_type {
3758 KM_PPC_SYNC_PAGE,
3759 KM_PPC_SYNC_ICACHE,
3760 KM_KDB,
3761+ KM_CLEARPAGE,
3762 KM_TYPE_NR
3763 };
3764
3765diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
3766index d4a7f64..451de1c 100644
3767--- a/arch/powerpc/include/asm/mman.h
3768+++ b/arch/powerpc/include/asm/mman.h
3769@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
3770 }
3771 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
3772
3773-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
3774+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
3775 {
3776 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
3777 }
3778diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
3779index f072e97..b436dee 100644
3780--- a/arch/powerpc/include/asm/page.h
3781+++ b/arch/powerpc/include/asm/page.h
3782@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
3783 * and needs to be executable. This means the whole heap ends
3784 * up being executable.
3785 */
3786-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3787- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3788+#define VM_DATA_DEFAULT_FLAGS32 \
3789+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3790+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3791
3792 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3793 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3794@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
3795 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3796 #endif
3797
3798+#define ktla_ktva(addr) (addr)
3799+#define ktva_ktla(addr) (addr)
3800+
3801 /*
3802 * Use the top bit of the higher-level page table entries to indicate whether
3803 * the entries we point to contain hugepages. This works because we know that
3804diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
3805index fed85e6..da5c71b 100644
3806--- a/arch/powerpc/include/asm/page_64.h
3807+++ b/arch/powerpc/include/asm/page_64.h
3808@@ -146,15 +146,18 @@ do { \
3809 * stack by default, so in the absence of a PT_GNU_STACK program header
3810 * we turn execute permission off.
3811 */
3812-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3813- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3814+#define VM_STACK_DEFAULT_FLAGS32 \
3815+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3816+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3817
3818 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3819 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3820
3821+#ifndef CONFIG_PAX_PAGEEXEC
3822 #define VM_STACK_DEFAULT_FLAGS \
3823 (is_32bit_task() ? \
3824 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3825+#endif
3826
3827 #include <asm-generic/getorder.h>
3828
3829diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
3830index 292725c..f87ae14 100644
3831--- a/arch/powerpc/include/asm/pgalloc-64.h
3832+++ b/arch/powerpc/include/asm/pgalloc-64.h
3833@@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
3834 #ifndef CONFIG_PPC_64K_PAGES
3835
3836 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
3837+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
3838
3839 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
3840 {
3841@@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3842 pud_set(pud, (unsigned long)pmd);
3843 }
3844
3845+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3846+{
3847+ pud_populate(mm, pud, pmd);
3848+}
3849+
3850 #define pmd_populate(mm, pmd, pte_page) \
3851 pmd_populate_kernel(mm, pmd, page_address(pte_page))
3852 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
3853@@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3854 #else /* CONFIG_PPC_64K_PAGES */
3855
3856 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
3857+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
3858
3859 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
3860 pte_t *pte)
3861diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
3862index 2e0e411..7899c68 100644
3863--- a/arch/powerpc/include/asm/pgtable.h
3864+++ b/arch/powerpc/include/asm/pgtable.h
3865@@ -2,6 +2,7 @@
3866 #define _ASM_POWERPC_PGTABLE_H
3867 #ifdef __KERNEL__
3868
3869+#include <linux/const.h>
3870 #ifndef __ASSEMBLY__
3871 #include <asm/processor.h> /* For TASK_SIZE */
3872 #include <asm/mmu.h>
3873diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3874index 4aad413..85d86bf 100644
3875--- a/arch/powerpc/include/asm/pte-hash32.h
3876+++ b/arch/powerpc/include/asm/pte-hash32.h
3877@@ -21,6 +21,7 @@
3878 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3879 #define _PAGE_USER 0x004 /* usermode access allowed */
3880 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
3881+#define _PAGE_EXEC _PAGE_GUARDED
3882 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3883 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3884 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
3885diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
3886index 9d7f0fb..a28fe69 100644
3887--- a/arch/powerpc/include/asm/reg.h
3888+++ b/arch/powerpc/include/asm/reg.h
3889@@ -212,6 +212,7 @@
3890 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3891 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3892 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3893+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3894 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3895 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3896 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
3897diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
3898index 4a741c7..c8162227b 100644
3899--- a/arch/powerpc/include/asm/thread_info.h
3900+++ b/arch/powerpc/include/asm/thread_info.h
3901@@ -104,12 +104,14 @@ static inline struct thread_info *current_thread_info(void)
3902 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
3903 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
3904 #define TIF_SINGLESTEP 8 /* singlestepping active */
3905-#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
3906 #define TIF_SECCOMP 10 /* secure computing */
3907 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
3908 #define TIF_NOERROR 12 /* Force successful syscall return */
3909 #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
3910 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
3911+#define TIF_MEMDIE 16 /* is terminating due to OOM killer */
3912+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
3913+#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
3914
3915 /* as above, but as bit values */
3916 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
3917@@ -127,8 +129,11 @@ static inline struct thread_info *current_thread_info(void)
3918 #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
3919 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
3920 #define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
3921+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
3922+
3923 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
3924- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
3925+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT \
3926+ _TIF_GRSEC_SETXID)
3927
3928 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
3929 _TIF_NOTIFY_RESUME)
3930diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3931index bd0fb84..a42a14b 100644
3932--- a/arch/powerpc/include/asm/uaccess.h
3933+++ b/arch/powerpc/include/asm/uaccess.h
3934@@ -13,6 +13,8 @@
3935 #define VERIFY_READ 0
3936 #define VERIFY_WRITE 1
3937
3938+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3939+
3940 /*
3941 * The fs value determines whether argument validity checking should be
3942 * performed or not. If get_fs() == USER_DS, checking is performed, with
3943@@ -327,52 +329,6 @@ do { \
3944 extern unsigned long __copy_tofrom_user(void __user *to,
3945 const void __user *from, unsigned long size);
3946
3947-#ifndef __powerpc64__
3948-
3949-static inline unsigned long copy_from_user(void *to,
3950- const void __user *from, unsigned long n)
3951-{
3952- unsigned long over;
3953-
3954- if (access_ok(VERIFY_READ, from, n))
3955- return __copy_tofrom_user((__force void __user *)to, from, n);
3956- if ((unsigned long)from < TASK_SIZE) {
3957- over = (unsigned long)from + n - TASK_SIZE;
3958- return __copy_tofrom_user((__force void __user *)to, from,
3959- n - over) + over;
3960- }
3961- return n;
3962-}
3963-
3964-static inline unsigned long copy_to_user(void __user *to,
3965- const void *from, unsigned long n)
3966-{
3967- unsigned long over;
3968-
3969- if (access_ok(VERIFY_WRITE, to, n))
3970- return __copy_tofrom_user(to, (__force void __user *)from, n);
3971- if ((unsigned long)to < TASK_SIZE) {
3972- over = (unsigned long)to + n - TASK_SIZE;
3973- return __copy_tofrom_user(to, (__force void __user *)from,
3974- n - over) + over;
3975- }
3976- return n;
3977-}
3978-
3979-#else /* __powerpc64__ */
3980-
3981-#define __copy_in_user(to, from, size) \
3982- __copy_tofrom_user((to), (from), (size))
3983-
3984-extern unsigned long copy_from_user(void *to, const void __user *from,
3985- unsigned long n);
3986-extern unsigned long copy_to_user(void __user *to, const void *from,
3987- unsigned long n);
3988-extern unsigned long copy_in_user(void __user *to, const void __user *from,
3989- unsigned long n);
3990-
3991-#endif /* __powerpc64__ */
3992-
3993 static inline unsigned long __copy_from_user_inatomic(void *to,
3994 const void __user *from, unsigned long n)
3995 {
3996@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
3997 if (ret == 0)
3998 return 0;
3999 }
4000+
4001+ if (!__builtin_constant_p(n))
4002+ check_object_size(to, n, false);
4003+
4004 return __copy_tofrom_user((__force void __user *)to, from, n);
4005 }
4006
4007@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
4008 if (ret == 0)
4009 return 0;
4010 }
4011+
4012+ if (!__builtin_constant_p(n))
4013+ check_object_size(from, n, true);
4014+
4015 return __copy_tofrom_user(to, (__force const void __user *)from, n);
4016 }
4017
4018@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
4019 return __copy_to_user_inatomic(to, from, size);
4020 }
4021
4022+#ifndef __powerpc64__
4023+
4024+static inline unsigned long __must_check copy_from_user(void *to,
4025+ const void __user *from, unsigned long n)
4026+{
4027+ unsigned long over;
4028+
4029+ if ((long)n < 0)
4030+ return n;
4031+
4032+ if (access_ok(VERIFY_READ, from, n)) {
4033+ if (!__builtin_constant_p(n))
4034+ check_object_size(to, n, false);
4035+ return __copy_tofrom_user((__force void __user *)to, from, n);
4036+ }
4037+ if ((unsigned long)from < TASK_SIZE) {
4038+ over = (unsigned long)from + n - TASK_SIZE;
4039+ if (!__builtin_constant_p(n - over))
4040+ check_object_size(to, n - over, false);
4041+ return __copy_tofrom_user((__force void __user *)to, from,
4042+ n - over) + over;
4043+ }
4044+ return n;
4045+}
4046+
4047+static inline unsigned long __must_check copy_to_user(void __user *to,
4048+ const void *from, unsigned long n)
4049+{
4050+ unsigned long over;
4051+
4052+ if ((long)n < 0)
4053+ return n;
4054+
4055+ if (access_ok(VERIFY_WRITE, to, n)) {
4056+ if (!__builtin_constant_p(n))
4057+ check_object_size(from, n, true);
4058+ return __copy_tofrom_user(to, (__force void __user *)from, n);
4059+ }
4060+ if ((unsigned long)to < TASK_SIZE) {
4061+ over = (unsigned long)to + n - TASK_SIZE;
4062+ if (!__builtin_constant_p(n))
4063+ check_object_size(from, n - over, true);
4064+ return __copy_tofrom_user(to, (__force void __user *)from,
4065+ n - over) + over;
4066+ }
4067+ return n;
4068+}
4069+
4070+#else /* __powerpc64__ */
4071+
4072+#define __copy_in_user(to, from, size) \
4073+ __copy_tofrom_user((to), (from), (size))
4074+
4075+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
4076+{
4077+ if ((long)n < 0 || n > INT_MAX)
4078+ return n;
4079+
4080+ if (!__builtin_constant_p(n))
4081+ check_object_size(to, n, false);
4082+
4083+ if (likely(access_ok(VERIFY_READ, from, n)))
4084+ n = __copy_from_user(to, from, n);
4085+ else
4086+ memset(to, 0, n);
4087+ return n;
4088+}
4089+
4090+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
4091+{
4092+ if ((long)n < 0 || n > INT_MAX)
4093+ return n;
4094+
4095+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
4096+ if (!__builtin_constant_p(n))
4097+ check_object_size(from, n, true);
4098+ n = __copy_to_user(to, from, n);
4099+ }
4100+ return n;
4101+}
4102+
4103+extern unsigned long copy_in_user(void __user *to, const void __user *from,
4104+ unsigned long n);
4105+
4106+#endif /* __powerpc64__ */
4107+
4108 extern unsigned long __clear_user(void __user *addr, unsigned long size);
4109
4110 static inline unsigned long clear_user(void __user *addr, unsigned long size)
4111diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
4112index 7215cc2..a9730c1 100644
4113--- a/arch/powerpc/kernel/exceptions-64e.S
4114+++ b/arch/powerpc/kernel/exceptions-64e.S
4115@@ -661,6 +661,7 @@ storage_fault_common:
4116 std r14,_DAR(r1)
4117 std r15,_DSISR(r1)
4118 addi r3,r1,STACK_FRAME_OVERHEAD
4119+ bl .save_nvgprs
4120 mr r4,r14
4121 mr r5,r15
4122 ld r14,PACA_EXGEN+EX_R14(r13)
4123@@ -669,8 +670,7 @@ storage_fault_common:
4124 cmpdi r3,0
4125 bne- 1f
4126 b .ret_from_except_lite
4127-1: bl .save_nvgprs
4128- mr r5,r3
4129+1: mr r5,r3
4130 addi r3,r1,STACK_FRAME_OVERHEAD
4131 ld r4,_DAR(r1)
4132 bl .bad_page_fault
4133diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
4134index 8f880bc..c5bd2f3 100644
4135--- a/arch/powerpc/kernel/exceptions-64s.S
4136+++ b/arch/powerpc/kernel/exceptions-64s.S
4137@@ -890,10 +890,10 @@ handle_page_fault:
4138 11: ld r4,_DAR(r1)
4139 ld r5,_DSISR(r1)
4140 addi r3,r1,STACK_FRAME_OVERHEAD
4141+ bl .save_nvgprs
4142 bl .do_page_fault
4143 cmpdi r3,0
4144 beq+ 12f
4145- bl .save_nvgprs
4146 mr r5,r3
4147 addi r3,r1,STACK_FRAME_OVERHEAD
4148 lwz r4,_DAR(r1)
4149diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
4150index 2e3200c..72095ce 100644
4151--- a/arch/powerpc/kernel/module_32.c
4152+++ b/arch/powerpc/kernel/module_32.c
4153@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
4154 me->arch.core_plt_section = i;
4155 }
4156 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
4157- printk("Module doesn't contain .plt or .init.plt sections.\n");
4158+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
4159 return -ENOEXEC;
4160 }
4161
4162@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
4163
4164 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
4165 /* Init, or core PLT? */
4166- if (location >= mod->module_core
4167- && location < mod->module_core + mod->core_size)
4168+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
4169+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
4170 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
4171- else
4172+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
4173+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
4174 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
4175+ else {
4176+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
4177+ return ~0UL;
4178+ }
4179
4180 /* Find this entry, or if that fails, the next avail. entry */
4181 while (entry->jump[0]) {
4182diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
4183index 4937c96..70714b7 100644
4184--- a/arch/powerpc/kernel/process.c
4185+++ b/arch/powerpc/kernel/process.c
4186@@ -681,8 +681,8 @@ void show_regs(struct pt_regs * regs)
4187 * Lookup NIP late so we have the best change of getting the
4188 * above info out without failing
4189 */
4190- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
4191- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
4192+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
4193+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
4194 #endif
4195 show_stack(current, (unsigned long *) regs->gpr[1]);
4196 if (!user_mode(regs))
4197@@ -1186,10 +1186,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4198 newsp = stack[0];
4199 ip = stack[STACK_FRAME_LR_SAVE];
4200 if (!firstframe || ip != lr) {
4201- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
4202+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
4203 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4204 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
4205- printk(" (%pS)",
4206+ printk(" (%pA)",
4207 (void *)current->ret_stack[curr_frame].ret);
4208 curr_frame--;
4209 }
4210@@ -1209,7 +1209,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4211 struct pt_regs *regs = (struct pt_regs *)
4212 (sp + STACK_FRAME_OVERHEAD);
4213 lr = regs->link;
4214- printk("--- Exception: %lx at %pS\n LR = %pS\n",
4215+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
4216 regs->trap, (void *)regs->nip, (void *)lr);
4217 firstframe = 1;
4218 }
4219@@ -1282,58 +1282,3 @@ void thread_info_cache_init(void)
4220 }
4221
4222 #endif /* THREAD_SHIFT < PAGE_SHIFT */
4223-
4224-unsigned long arch_align_stack(unsigned long sp)
4225-{
4226- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4227- sp -= get_random_int() & ~PAGE_MASK;
4228- return sp & ~0xf;
4229-}
4230-
4231-static inline unsigned long brk_rnd(void)
4232-{
4233- unsigned long rnd = 0;
4234-
4235- /* 8MB for 32bit, 1GB for 64bit */
4236- if (is_32bit_task())
4237- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
4238- else
4239- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
4240-
4241- return rnd << PAGE_SHIFT;
4242-}
4243-
4244-unsigned long arch_randomize_brk(struct mm_struct *mm)
4245-{
4246- unsigned long base = mm->brk;
4247- unsigned long ret;
4248-
4249-#ifdef CONFIG_PPC_STD_MMU_64
4250- /*
4251- * If we are using 1TB segments and we are allowed to randomise
4252- * the heap, we can put it above 1TB so it is backed by a 1TB
4253- * segment. Otherwise the heap will be in the bottom 1TB
4254- * which always uses 256MB segments and this may result in a
4255- * performance penalty.
4256- */
4257- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
4258- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
4259-#endif
4260-
4261- ret = PAGE_ALIGN(base + brk_rnd());
4262-
4263- if (ret < mm->brk)
4264- return mm->brk;
4265-
4266- return ret;
4267-}
4268-
4269-unsigned long randomize_et_dyn(unsigned long base)
4270-{
4271- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4272-
4273- if (ret < base)
4274- return base;
4275-
4276- return ret;
4277-}
4278diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
4279index 8d8e028..c2aeb50 100644
4280--- a/arch/powerpc/kernel/ptrace.c
4281+++ b/arch/powerpc/kernel/ptrace.c
4282@@ -1702,6 +1702,10 @@ long arch_ptrace(struct task_struct *child, long request,
4283 return ret;
4284 }
4285
4286+#ifdef CONFIG_GRKERNSEC_SETXID
4287+extern void gr_delayed_cred_worker(void);
4288+#endif
4289+
4290 /*
4291 * We must return the syscall number to actually look up in the table.
4292 * This can be -1L to skip running any syscall at all.
4293@@ -1712,6 +1716,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
4294
4295 secure_computing(regs->gpr[0]);
4296
4297+#ifdef CONFIG_GRKERNSEC_SETXID
4298+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4299+ gr_delayed_cred_worker();
4300+#endif
4301+
4302 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
4303 tracehook_report_syscall_entry(regs))
4304 /*
4305@@ -1746,6 +1755,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
4306 {
4307 int step;
4308
4309+#ifdef CONFIG_GRKERNSEC_SETXID
4310+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4311+ gr_delayed_cred_worker();
4312+#endif
4313+
4314 audit_syscall_exit(regs);
4315
4316 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
4317diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
4318index 45eb998..0cb36bc 100644
4319--- a/arch/powerpc/kernel/signal_32.c
4320+++ b/arch/powerpc/kernel/signal_32.c
4321@@ -861,7 +861,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
4322 /* Save user registers on the stack */
4323 frame = &rt_sf->uc.uc_mcontext;
4324 addr = frame;
4325- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
4326+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4327 if (save_user_regs(regs, frame, 0, 1))
4328 goto badframe;
4329 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
4330diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
4331index 2692efd..6673d2e 100644
4332--- a/arch/powerpc/kernel/signal_64.c
4333+++ b/arch/powerpc/kernel/signal_64.c
4334@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
4335 current->thread.fpscr.val = 0;
4336
4337 /* Set up to return from userspace. */
4338- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
4339+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4340 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
4341 } else {
4342 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
4343diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
4344index 1589723..cefe690 100644
4345--- a/arch/powerpc/kernel/traps.c
4346+++ b/arch/powerpc/kernel/traps.c
4347@@ -133,6 +133,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
4348 return flags;
4349 }
4350
4351+extern void gr_handle_kernel_exploit(void);
4352+
4353 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4354 int signr)
4355 {
4356@@ -182,6 +184,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4357 panic("Fatal exception in interrupt");
4358 if (panic_on_oops)
4359 panic("Fatal exception");
4360+
4361+ gr_handle_kernel_exploit();
4362+
4363 do_exit(signr);
4364 }
4365
4366diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
4367index 9eb5b9b..e45498a 100644
4368--- a/arch/powerpc/kernel/vdso.c
4369+++ b/arch/powerpc/kernel/vdso.c
4370@@ -34,6 +34,7 @@
4371 #include <asm/firmware.h>
4372 #include <asm/vdso.h>
4373 #include <asm/vdso_datapage.h>
4374+#include <asm/mman.h>
4375
4376 #include "setup.h"
4377
4378@@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4379 vdso_base = VDSO32_MBASE;
4380 #endif
4381
4382- current->mm->context.vdso_base = 0;
4383+ current->mm->context.vdso_base = ~0UL;
4384
4385 /* vDSO has a problem and was disabled, just don't "enable" it for the
4386 * process
4387@@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4388 vdso_base = get_unmapped_area(NULL, vdso_base,
4389 (vdso_pages << PAGE_SHIFT) +
4390 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
4391- 0, 0);
4392+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
4393 if (IS_ERR_VALUE(vdso_base)) {
4394 rc = vdso_base;
4395 goto fail_mmapsem;
4396diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
4397index 5eea6f3..5d10396 100644
4398--- a/arch/powerpc/lib/usercopy_64.c
4399+++ b/arch/powerpc/lib/usercopy_64.c
4400@@ -9,22 +9,6 @@
4401 #include <linux/module.h>
4402 #include <asm/uaccess.h>
4403
4404-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4405-{
4406- if (likely(access_ok(VERIFY_READ, from, n)))
4407- n = __copy_from_user(to, from, n);
4408- else
4409- memset(to, 0, n);
4410- return n;
4411-}
4412-
4413-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4414-{
4415- if (likely(access_ok(VERIFY_WRITE, to, n)))
4416- n = __copy_to_user(to, from, n);
4417- return n;
4418-}
4419-
4420 unsigned long copy_in_user(void __user *to, const void __user *from,
4421 unsigned long n)
4422 {
4423@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
4424 return n;
4425 }
4426
4427-EXPORT_SYMBOL(copy_from_user);
4428-EXPORT_SYMBOL(copy_to_user);
4429 EXPORT_SYMBOL(copy_in_user);
4430
4431diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
4432index 08ffcf5..a0ab912 100644
4433--- a/arch/powerpc/mm/fault.c
4434+++ b/arch/powerpc/mm/fault.c
4435@@ -32,6 +32,10 @@
4436 #include <linux/perf_event.h>
4437 #include <linux/magic.h>
4438 #include <linux/ratelimit.h>
4439+#include <linux/slab.h>
4440+#include <linux/pagemap.h>
4441+#include <linux/compiler.h>
4442+#include <linux/unistd.h>
4443
4444 #include <asm/firmware.h>
4445 #include <asm/page.h>
4446@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
4447 }
4448 #endif
4449
4450+#ifdef CONFIG_PAX_PAGEEXEC
4451+/*
4452+ * PaX: decide what to do with offenders (regs->nip = fault address)
4453+ *
4454+ * returns 1 when task should be killed
4455+ */
4456+static int pax_handle_fetch_fault(struct pt_regs *regs)
4457+{
4458+ return 1;
4459+}
4460+
4461+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4462+{
4463+ unsigned long i;
4464+
4465+ printk(KERN_ERR "PAX: bytes at PC: ");
4466+ for (i = 0; i < 5; i++) {
4467+ unsigned int c;
4468+ if (get_user(c, (unsigned int __user *)pc+i))
4469+ printk(KERN_CONT "???????? ");
4470+ else
4471+ printk(KERN_CONT "%08x ", c);
4472+ }
4473+ printk("\n");
4474+}
4475+#endif
4476+
4477 /*
4478 * Check whether the instruction at regs->nip is a store using
4479 * an update addressing form which will update r1.
4480@@ -215,7 +246,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
4481 * indicate errors in DSISR but can validly be set in SRR1.
4482 */
4483 if (trap == 0x400)
4484- error_code &= 0x48200000;
4485+ error_code &= 0x58200000;
4486 else
4487 is_write = error_code & DSISR_ISSTORE;
4488 #else
4489@@ -366,7 +397,7 @@ good_area:
4490 * "undefined". Of those that can be set, this is the only
4491 * one which seems bad.
4492 */
4493- if (error_code & 0x10000000)
4494+ if (error_code & DSISR_GUARDED)
4495 /* Guarded storage error. */
4496 goto bad_area;
4497 #endif /* CONFIG_8xx */
4498@@ -381,7 +412,7 @@ good_area:
4499 * processors use the same I/D cache coherency mechanism
4500 * as embedded.
4501 */
4502- if (error_code & DSISR_PROTFAULT)
4503+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4504 goto bad_area;
4505 #endif /* CONFIG_PPC_STD_MMU */
4506
4507@@ -463,6 +494,23 @@ bad_area:
4508 bad_area_nosemaphore:
4509 /* User mode accesses cause a SIGSEGV */
4510 if (user_mode(regs)) {
4511+
4512+#ifdef CONFIG_PAX_PAGEEXEC
4513+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4514+#ifdef CONFIG_PPC_STD_MMU
4515+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4516+#else
4517+ if (is_exec && regs->nip == address) {
4518+#endif
4519+ switch (pax_handle_fetch_fault(regs)) {
4520+ }
4521+
4522+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4523+ do_group_exit(SIGKILL);
4524+ }
4525+ }
4526+#endif
4527+
4528 _exception(SIGSEGV, regs, code, address);
4529 return 0;
4530 }
4531diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
4532index 67a42ed..1c7210c 100644
4533--- a/arch/powerpc/mm/mmap_64.c
4534+++ b/arch/powerpc/mm/mmap_64.c
4535@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4536 */
4537 if (mmap_is_legacy()) {
4538 mm->mmap_base = TASK_UNMAPPED_BASE;
4539+
4540+#ifdef CONFIG_PAX_RANDMMAP
4541+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4542+ mm->mmap_base += mm->delta_mmap;
4543+#endif
4544+
4545 mm->get_unmapped_area = arch_get_unmapped_area;
4546 mm->unmap_area = arch_unmap_area;
4547 } else {
4548 mm->mmap_base = mmap_base();
4549+
4550+#ifdef CONFIG_PAX_RANDMMAP
4551+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4552+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4553+#endif
4554+
4555 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4556 mm->unmap_area = arch_unmap_area_topdown;
4557 }
4558diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4559index 73709f7..6b90313 100644
4560--- a/arch/powerpc/mm/slice.c
4561+++ b/arch/powerpc/mm/slice.c
4562@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
4563 if ((mm->task_size - len) < addr)
4564 return 0;
4565 vma = find_vma(mm, addr);
4566- return (!vma || (addr + len) <= vma->vm_start);
4567+ return check_heap_stack_gap(vma, addr, len);
4568 }
4569
4570 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4571@@ -256,7 +256,7 @@ full_search:
4572 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4573 continue;
4574 }
4575- if (!vma || addr + len <= vma->vm_start) {
4576+ if (check_heap_stack_gap(vma, addr, len)) {
4577 /*
4578 * Remember the place where we stopped the search:
4579 */
4580@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4581 }
4582 }
4583
4584- addr = mm->mmap_base;
4585- while (addr > len) {
4586+ if (mm->mmap_base < len)
4587+ addr = -ENOMEM;
4588+ else
4589+ addr = mm->mmap_base - len;
4590+
4591+ while (!IS_ERR_VALUE(addr)) {
4592 /* Go down by chunk size */
4593- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4594+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
4595
4596 /* Check for hit with different page size */
4597 mask = slice_range_to_mask(addr, len);
4598@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4599 * return with success:
4600 */
4601 vma = find_vma(mm, addr);
4602- if (!vma || (addr + len) <= vma->vm_start) {
4603+ if (check_heap_stack_gap(vma, addr, len)) {
4604 /* remember the address as a hint for next time */
4605 if (use_cache)
4606 mm->free_area_cache = addr;
4607@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4608 mm->cached_hole_size = vma->vm_start - addr;
4609
4610 /* try just below the current vma->vm_start */
4611- addr = vma->vm_start;
4612+ addr = skip_heap_stack_gap(vma, len);
4613 }
4614
4615 /*
4616@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
4617 if (fixed && addr > (mm->task_size - len))
4618 return -EINVAL;
4619
4620+#ifdef CONFIG_PAX_RANDMMAP
4621+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4622+ addr = 0;
4623+#endif
4624+
4625 /* If hint, make sure it matches our alignment restrictions */
4626 if (!fixed && addr) {
4627 addr = _ALIGN_UP(addr, 1ul << pshift);
4628diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4629index 748347b..81bc6c7 100644
4630--- a/arch/s390/include/asm/atomic.h
4631+++ b/arch/s390/include/asm/atomic.h
4632@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
4633 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4634 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4635
4636+#define atomic64_read_unchecked(v) atomic64_read(v)
4637+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4638+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4639+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4640+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4641+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4642+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4643+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4644+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4645+
4646 #define smp_mb__before_atomic_dec() smp_mb()
4647 #define smp_mb__after_atomic_dec() smp_mb()
4648 #define smp_mb__before_atomic_inc() smp_mb()
4649diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4650index 2a30d5a..5e5586f 100644
4651--- a/arch/s390/include/asm/cache.h
4652+++ b/arch/s390/include/asm/cache.h
4653@@ -11,8 +11,10 @@
4654 #ifndef __ARCH_S390_CACHE_H
4655 #define __ARCH_S390_CACHE_H
4656
4657-#define L1_CACHE_BYTES 256
4658+#include <linux/const.h>
4659+
4660 #define L1_CACHE_SHIFT 8
4661+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4662 #define NET_SKB_PAD 32
4663
4664 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4665diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4666index c4ee39f..352881b 100644
4667--- a/arch/s390/include/asm/elf.h
4668+++ b/arch/s390/include/asm/elf.h
4669@@ -161,8 +161,14 @@ extern unsigned int vdso_enabled;
4670 the loader. We need to make sure that it is out of the way of the program
4671 that it will "exec", and that there is sufficient room for the brk. */
4672
4673-extern unsigned long randomize_et_dyn(unsigned long base);
4674-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
4675+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4676+
4677+#ifdef CONFIG_PAX_ASLR
4678+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4679+
4680+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4681+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4682+#endif
4683
4684 /* This yields a mask that user programs can use to figure out what
4685 instruction set this CPU supports. */
4686@@ -210,7 +216,4 @@ struct linux_binprm;
4687 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
4688 int arch_setup_additional_pages(struct linux_binprm *, int);
4689
4690-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
4691-#define arch_randomize_brk arch_randomize_brk
4692-
4693 #endif
4694diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
4695index c4a93d6..4d2a9b4 100644
4696--- a/arch/s390/include/asm/exec.h
4697+++ b/arch/s390/include/asm/exec.h
4698@@ -7,6 +7,6 @@
4699 #ifndef __ASM_EXEC_H
4700 #define __ASM_EXEC_H
4701
4702-extern unsigned long arch_align_stack(unsigned long sp);
4703+#define arch_align_stack(x) ((x) & ~0xfUL)
4704
4705 #endif /* __ASM_EXEC_H */
4706diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4707index 8f2cada..1cddd55 100644
4708--- a/arch/s390/include/asm/uaccess.h
4709+++ b/arch/s390/include/asm/uaccess.h
4710@@ -236,6 +236,10 @@ static inline unsigned long __must_check
4711 copy_to_user(void __user *to, const void *from, unsigned long n)
4712 {
4713 might_fault();
4714+
4715+ if ((long)n < 0)
4716+ return n;
4717+
4718 if (access_ok(VERIFY_WRITE, to, n))
4719 n = __copy_to_user(to, from, n);
4720 return n;
4721@@ -261,6 +265,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4722 static inline unsigned long __must_check
4723 __copy_from_user(void *to, const void __user *from, unsigned long n)
4724 {
4725+ if ((long)n < 0)
4726+ return n;
4727+
4728 if (__builtin_constant_p(n) && (n <= 256))
4729 return uaccess.copy_from_user_small(n, from, to);
4730 else
4731@@ -295,6 +302,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
4732 unsigned int sz = __compiletime_object_size(to);
4733
4734 might_fault();
4735+
4736+ if ((long)n < 0)
4737+ return n;
4738+
4739 if (unlikely(sz != -1 && sz < n)) {
4740 copy_from_user_overflow();
4741 return n;
4742diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4743index dfcb343..eda788a 100644
4744--- a/arch/s390/kernel/module.c
4745+++ b/arch/s390/kernel/module.c
4746@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4747
4748 /* Increase core size by size of got & plt and set start
4749 offsets for got and plt. */
4750- me->core_size = ALIGN(me->core_size, 4);
4751- me->arch.got_offset = me->core_size;
4752- me->core_size += me->arch.got_size;
4753- me->arch.plt_offset = me->core_size;
4754- me->core_size += me->arch.plt_size;
4755+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
4756+ me->arch.got_offset = me->core_size_rw;
4757+ me->core_size_rw += me->arch.got_size;
4758+ me->arch.plt_offset = me->core_size_rx;
4759+ me->core_size_rx += me->arch.plt_size;
4760 return 0;
4761 }
4762
4763@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4764 if (info->got_initialized == 0) {
4765 Elf_Addr *gotent;
4766
4767- gotent = me->module_core + me->arch.got_offset +
4768+ gotent = me->module_core_rw + me->arch.got_offset +
4769 info->got_offset;
4770 *gotent = val;
4771 info->got_initialized = 1;
4772@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4773 else if (r_type == R_390_GOTENT ||
4774 r_type == R_390_GOTPLTENT)
4775 *(unsigned int *) loc =
4776- (val + (Elf_Addr) me->module_core - loc) >> 1;
4777+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4778 else if (r_type == R_390_GOT64 ||
4779 r_type == R_390_GOTPLT64)
4780 *(unsigned long *) loc = val;
4781@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4782 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4783 if (info->plt_initialized == 0) {
4784 unsigned int *ip;
4785- ip = me->module_core + me->arch.plt_offset +
4786+ ip = me->module_core_rx + me->arch.plt_offset +
4787 info->plt_offset;
4788 #ifndef CONFIG_64BIT
4789 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4790@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4791 val - loc + 0xffffUL < 0x1ffffeUL) ||
4792 (r_type == R_390_PLT32DBL &&
4793 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4794- val = (Elf_Addr) me->module_core +
4795+ val = (Elf_Addr) me->module_core_rx +
4796 me->arch.plt_offset +
4797 info->plt_offset;
4798 val += rela->r_addend - loc;
4799@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4800 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4801 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4802 val = val + rela->r_addend -
4803- ((Elf_Addr) me->module_core + me->arch.got_offset);
4804+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4805 if (r_type == R_390_GOTOFF16)
4806 *(unsigned short *) loc = val;
4807 else if (r_type == R_390_GOTOFF32)
4808@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4809 break;
4810 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4811 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4812- val = (Elf_Addr) me->module_core + me->arch.got_offset +
4813+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4814 rela->r_addend - loc;
4815 if (r_type == R_390_GOTPC)
4816 *(unsigned int *) loc = val;
4817diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
4818index 60055ce..ee4b252 100644
4819--- a/arch/s390/kernel/process.c
4820+++ b/arch/s390/kernel/process.c
4821@@ -316,39 +316,3 @@ unsigned long get_wchan(struct task_struct *p)
4822 }
4823 return 0;
4824 }
4825-
4826-unsigned long arch_align_stack(unsigned long sp)
4827-{
4828- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4829- sp -= get_random_int() & ~PAGE_MASK;
4830- return sp & ~0xf;
4831-}
4832-
4833-static inline unsigned long brk_rnd(void)
4834-{
4835- /* 8MB for 32bit, 1GB for 64bit */
4836- if (is_32bit_task())
4837- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
4838- else
4839- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
4840-}
4841-
4842-unsigned long arch_randomize_brk(struct mm_struct *mm)
4843-{
4844- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
4845-
4846- if (ret < mm->brk)
4847- return mm->brk;
4848- return ret;
4849-}
4850-
4851-unsigned long randomize_et_dyn(unsigned long base)
4852-{
4853- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4854-
4855- if (!(current->flags & PF_RANDOMIZE))
4856- return base;
4857- if (ret < base)
4858- return base;
4859- return ret;
4860-}
4861diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4862index 2857c48..d047481 100644
4863--- a/arch/s390/mm/mmap.c
4864+++ b/arch/s390/mm/mmap.c
4865@@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4866 */
4867 if (mmap_is_legacy()) {
4868 mm->mmap_base = TASK_UNMAPPED_BASE;
4869+
4870+#ifdef CONFIG_PAX_RANDMMAP
4871+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4872+ mm->mmap_base += mm->delta_mmap;
4873+#endif
4874+
4875 mm->get_unmapped_area = arch_get_unmapped_area;
4876 mm->unmap_area = arch_unmap_area;
4877 } else {
4878 mm->mmap_base = mmap_base();
4879+
4880+#ifdef CONFIG_PAX_RANDMMAP
4881+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4882+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4883+#endif
4884+
4885 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4886 mm->unmap_area = arch_unmap_area_topdown;
4887 }
4888@@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4889 */
4890 if (mmap_is_legacy()) {
4891 mm->mmap_base = TASK_UNMAPPED_BASE;
4892+
4893+#ifdef CONFIG_PAX_RANDMMAP
4894+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4895+ mm->mmap_base += mm->delta_mmap;
4896+#endif
4897+
4898 mm->get_unmapped_area = s390_get_unmapped_area;
4899 mm->unmap_area = arch_unmap_area;
4900 } else {
4901 mm->mmap_base = mmap_base();
4902+
4903+#ifdef CONFIG_PAX_RANDMMAP
4904+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4905+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4906+#endif
4907+
4908 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4909 mm->unmap_area = arch_unmap_area_topdown;
4910 }
4911diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
4912index ae3d59f..f65f075 100644
4913--- a/arch/score/include/asm/cache.h
4914+++ b/arch/score/include/asm/cache.h
4915@@ -1,7 +1,9 @@
4916 #ifndef _ASM_SCORE_CACHE_H
4917 #define _ASM_SCORE_CACHE_H
4918
4919+#include <linux/const.h>
4920+
4921 #define L1_CACHE_SHIFT 4
4922-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4923+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4924
4925 #endif /* _ASM_SCORE_CACHE_H */
4926diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
4927index f9f3cd5..58ff438 100644
4928--- a/arch/score/include/asm/exec.h
4929+++ b/arch/score/include/asm/exec.h
4930@@ -1,6 +1,6 @@
4931 #ifndef _ASM_SCORE_EXEC_H
4932 #define _ASM_SCORE_EXEC_H
4933
4934-extern unsigned long arch_align_stack(unsigned long sp);
4935+#define arch_align_stack(x) (x)
4936
4937 #endif /* _ASM_SCORE_EXEC_H */
4938diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4939index 2707023..1c2a3b7 100644
4940--- a/arch/score/kernel/process.c
4941+++ b/arch/score/kernel/process.c
4942@@ -159,8 +159,3 @@ unsigned long get_wchan(struct task_struct *task)
4943
4944 return task_pt_regs(task)->cp0_epc;
4945 }
4946-
4947-unsigned long arch_align_stack(unsigned long sp)
4948-{
4949- return sp;
4950-}
4951diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
4952index ef9e555..331bd29 100644
4953--- a/arch/sh/include/asm/cache.h
4954+++ b/arch/sh/include/asm/cache.h
4955@@ -9,10 +9,11 @@
4956 #define __ASM_SH_CACHE_H
4957 #ifdef __KERNEL__
4958
4959+#include <linux/const.h>
4960 #include <linux/init.h>
4961 #include <cpu/cache.h>
4962
4963-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4964+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4965
4966 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4967
4968diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4969index afeb710..d1d1289 100644
4970--- a/arch/sh/mm/mmap.c
4971+++ b/arch/sh/mm/mmap.c
4972@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4973 addr = PAGE_ALIGN(addr);
4974
4975 vma = find_vma(mm, addr);
4976- if (TASK_SIZE - len >= addr &&
4977- (!vma || addr + len <= vma->vm_start))
4978+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4979 return addr;
4980 }
4981
4982@@ -106,7 +105,7 @@ full_search:
4983 }
4984 return -ENOMEM;
4985 }
4986- if (likely(!vma || addr + len <= vma->vm_start)) {
4987+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4988 /*
4989 * Remember the place where we stopped the search:
4990 */
4991@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4992 addr = PAGE_ALIGN(addr);
4993
4994 vma = find_vma(mm, addr);
4995- if (TASK_SIZE - len >= addr &&
4996- (!vma || addr + len <= vma->vm_start))
4997+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4998 return addr;
4999 }
5000
5001@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5002 /* make sure it can fit in the remaining address space */
5003 if (likely(addr > len)) {
5004 vma = find_vma(mm, addr-len);
5005- if (!vma || addr <= vma->vm_start) {
5006+ if (check_heap_stack_gap(vma, addr - len, len)) {
5007 /* remember the address as a hint for next time */
5008 return (mm->free_area_cache = addr-len);
5009 }
5010@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5011 if (unlikely(mm->mmap_base < len))
5012 goto bottomup;
5013
5014- addr = mm->mmap_base-len;
5015- if (do_colour_align)
5016- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5017+ addr = mm->mmap_base - len;
5018
5019 do {
5020+ if (do_colour_align)
5021+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5022 /*
5023 * Lookup failure means no vma is above this address,
5024 * else if new region fits below vma->vm_start,
5025 * return with success:
5026 */
5027 vma = find_vma(mm, addr);
5028- if (likely(!vma || addr+len <= vma->vm_start)) {
5029+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5030 /* remember the address as a hint for next time */
5031 return (mm->free_area_cache = addr);
5032 }
5033@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5034 mm->cached_hole_size = vma->vm_start - addr;
5035
5036 /* try just below the current vma->vm_start */
5037- addr = vma->vm_start-len;
5038- if (do_colour_align)
5039- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5040- } while (likely(len < vma->vm_start));
5041+ addr = skip_heap_stack_gap(vma, len);
5042+ } while (!IS_ERR_VALUE(addr));
5043
5044 bottomup:
5045 /*
5046diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
5047index eddcfb3..b117d90 100644
5048--- a/arch/sparc/Makefile
5049+++ b/arch/sparc/Makefile
5050@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
5051 # Export what is needed by arch/sparc/boot/Makefile
5052 export VMLINUX_INIT VMLINUX_MAIN
5053 VMLINUX_INIT := $(head-y) $(init-y)
5054-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5055+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5056 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5057 VMLINUX_MAIN += $(drivers-y) $(net-y)
5058
5059diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
5060index ce35a1c..2e7b8f9 100644
5061--- a/arch/sparc/include/asm/atomic_64.h
5062+++ b/arch/sparc/include/asm/atomic_64.h
5063@@ -14,18 +14,40 @@
5064 #define ATOMIC64_INIT(i) { (i) }
5065
5066 #define atomic_read(v) (*(volatile int *)&(v)->counter)
5067+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5068+{
5069+ return v->counter;
5070+}
5071 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
5072+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5073+{
5074+ return v->counter;
5075+}
5076
5077 #define atomic_set(v, i) (((v)->counter) = i)
5078+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5079+{
5080+ v->counter = i;
5081+}
5082 #define atomic64_set(v, i) (((v)->counter) = i)
5083+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5084+{
5085+ v->counter = i;
5086+}
5087
5088 extern void atomic_add(int, atomic_t *);
5089+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
5090 extern void atomic64_add(long, atomic64_t *);
5091+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
5092 extern void atomic_sub(int, atomic_t *);
5093+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
5094 extern void atomic64_sub(long, atomic64_t *);
5095+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
5096
5097 extern int atomic_add_ret(int, atomic_t *);
5098+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
5099 extern long atomic64_add_ret(long, atomic64_t *);
5100+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
5101 extern int atomic_sub_ret(int, atomic_t *);
5102 extern long atomic64_sub_ret(long, atomic64_t *);
5103
5104@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5105 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
5106
5107 #define atomic_inc_return(v) atomic_add_ret(1, v)
5108+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5109+{
5110+ return atomic_add_ret_unchecked(1, v);
5111+}
5112 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
5113+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5114+{
5115+ return atomic64_add_ret_unchecked(1, v);
5116+}
5117
5118 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
5119 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
5120
5121 #define atomic_add_return(i, v) atomic_add_ret(i, v)
5122+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5123+{
5124+ return atomic_add_ret_unchecked(i, v);
5125+}
5126 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
5127+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
5128+{
5129+ return atomic64_add_ret_unchecked(i, v);
5130+}
5131
5132 /*
5133 * atomic_inc_and_test - increment and test
5134@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5135 * other cases.
5136 */
5137 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5138+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5139+{
5140+ return atomic_inc_return_unchecked(v) == 0;
5141+}
5142 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
5143
5144 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
5145@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5146 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
5147
5148 #define atomic_inc(v) atomic_add(1, v)
5149+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
5150+{
5151+ atomic_add_unchecked(1, v);
5152+}
5153 #define atomic64_inc(v) atomic64_add(1, v)
5154+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
5155+{
5156+ atomic64_add_unchecked(1, v);
5157+}
5158
5159 #define atomic_dec(v) atomic_sub(1, v)
5160+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
5161+{
5162+ atomic_sub_unchecked(1, v);
5163+}
5164 #define atomic64_dec(v) atomic64_sub(1, v)
5165+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
5166+{
5167+ atomic64_sub_unchecked(1, v);
5168+}
5169
5170 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
5171 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
5172
5173 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5174+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
5175+{
5176+ return cmpxchg(&v->counter, old, new);
5177+}
5178 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
5179+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5180+{
5181+ return xchg(&v->counter, new);
5182+}
5183
5184 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5185 {
5186- int c, old;
5187+ int c, old, new;
5188 c = atomic_read(v);
5189 for (;;) {
5190- if (unlikely(c == (u)))
5191+ if (unlikely(c == u))
5192 break;
5193- old = atomic_cmpxchg((v), c, c + (a));
5194+
5195+ asm volatile("addcc %2, %0, %0\n"
5196+
5197+#ifdef CONFIG_PAX_REFCOUNT
5198+ "tvs %%icc, 6\n"
5199+#endif
5200+
5201+ : "=r" (new)
5202+ : "0" (c), "ir" (a)
5203+ : "cc");
5204+
5205+ old = atomic_cmpxchg(v, c, new);
5206 if (likely(old == c))
5207 break;
5208 c = old;
5209@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5210 #define atomic64_cmpxchg(v, o, n) \
5211 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
5212 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
5213+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
5214+{
5215+ return xchg(&v->counter, new);
5216+}
5217
5218 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
5219 {
5220- long c, old;
5221+ long c, old, new;
5222 c = atomic64_read(v);
5223 for (;;) {
5224- if (unlikely(c == (u)))
5225+ if (unlikely(c == u))
5226 break;
5227- old = atomic64_cmpxchg((v), c, c + (a));
5228+
5229+ asm volatile("addcc %2, %0, %0\n"
5230+
5231+#ifdef CONFIG_PAX_REFCOUNT
5232+ "tvs %%xcc, 6\n"
5233+#endif
5234+
5235+ : "=r" (new)
5236+ : "0" (c), "ir" (a)
5237+ : "cc");
5238+
5239+ old = atomic64_cmpxchg(v, c, new);
5240 if (likely(old == c))
5241 break;
5242 c = old;
5243 }
5244- return c != (u);
5245+ return c != u;
5246 }
5247
5248 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5249diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
5250index 69358b5..9d0d492 100644
5251--- a/arch/sparc/include/asm/cache.h
5252+++ b/arch/sparc/include/asm/cache.h
5253@@ -7,10 +7,12 @@
5254 #ifndef _SPARC_CACHE_H
5255 #define _SPARC_CACHE_H
5256
5257+#include <linux/const.h>
5258+
5259 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
5260
5261 #define L1_CACHE_SHIFT 5
5262-#define L1_CACHE_BYTES 32
5263+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5264
5265 #ifdef CONFIG_SPARC32
5266 #define SMP_CACHE_BYTES_SHIFT 5
5267diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
5268index 4269ca6..e3da77f 100644
5269--- a/arch/sparc/include/asm/elf_32.h
5270+++ b/arch/sparc/include/asm/elf_32.h
5271@@ -114,6 +114,13 @@ typedef struct {
5272
5273 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
5274
5275+#ifdef CONFIG_PAX_ASLR
5276+#define PAX_ELF_ET_DYN_BASE 0x10000UL
5277+
5278+#define PAX_DELTA_MMAP_LEN 16
5279+#define PAX_DELTA_STACK_LEN 16
5280+#endif
5281+
5282 /* This yields a mask that user programs can use to figure out what
5283 instruction set this cpu supports. This can NOT be done in userspace
5284 on Sparc. */
5285diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
5286index 7df8b7f..4946269 100644
5287--- a/arch/sparc/include/asm/elf_64.h
5288+++ b/arch/sparc/include/asm/elf_64.h
5289@@ -180,6 +180,13 @@ typedef struct {
5290 #define ELF_ET_DYN_BASE 0x0000010000000000UL
5291 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
5292
5293+#ifdef CONFIG_PAX_ASLR
5294+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
5295+
5296+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
5297+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
5298+#endif
5299+
5300 extern unsigned long sparc64_elf_hwcap;
5301 #define ELF_HWCAP sparc64_elf_hwcap
5302
5303diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
5304index ca2b344..c6084f89 100644
5305--- a/arch/sparc/include/asm/pgalloc_32.h
5306+++ b/arch/sparc/include/asm/pgalloc_32.h
5307@@ -37,6 +37,7 @@ BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
5308 BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
5309 #define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
5310 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
5311+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
5312
5313 BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
5314 #define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address)
5315diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
5316index 40b2d7a..22a665b 100644
5317--- a/arch/sparc/include/asm/pgalloc_64.h
5318+++ b/arch/sparc/include/asm/pgalloc_64.h
5319@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
5320 }
5321
5322 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
5323+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
5324
5325 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5326 {
5327diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
5328index 3d71018..48a11c5 100644
5329--- a/arch/sparc/include/asm/pgtable_32.h
5330+++ b/arch/sparc/include/asm/pgtable_32.h
5331@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
5332 BTFIXUPDEF_INT(page_none)
5333 BTFIXUPDEF_INT(page_copy)
5334 BTFIXUPDEF_INT(page_readonly)
5335+
5336+#ifdef CONFIG_PAX_PAGEEXEC
5337+BTFIXUPDEF_INT(page_shared_noexec)
5338+BTFIXUPDEF_INT(page_copy_noexec)
5339+BTFIXUPDEF_INT(page_readonly_noexec)
5340+#endif
5341+
5342 BTFIXUPDEF_INT(page_kernel)
5343
5344 #define PMD_SHIFT SUN4C_PMD_SHIFT
5345@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
5346 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
5347 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
5348
5349+#ifdef CONFIG_PAX_PAGEEXEC
5350+extern pgprot_t PAGE_SHARED_NOEXEC;
5351+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
5352+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
5353+#else
5354+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5355+# define PAGE_COPY_NOEXEC PAGE_COPY
5356+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5357+#endif
5358+
5359 extern unsigned long page_kernel;
5360
5361 #ifdef MODULE
5362diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
5363index f6ae2b2..b03ffc7 100644
5364--- a/arch/sparc/include/asm/pgtsrmmu.h
5365+++ b/arch/sparc/include/asm/pgtsrmmu.h
5366@@ -115,6 +115,13 @@
5367 SRMMU_EXEC | SRMMU_REF)
5368 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
5369 SRMMU_EXEC | SRMMU_REF)
5370+
5371+#ifdef CONFIG_PAX_PAGEEXEC
5372+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
5373+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5374+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5375+#endif
5376+
5377 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
5378 SRMMU_DIRTY | SRMMU_REF)
5379
5380diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
5381index 9689176..63c18ea 100644
5382--- a/arch/sparc/include/asm/spinlock_64.h
5383+++ b/arch/sparc/include/asm/spinlock_64.h
5384@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
5385
5386 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
5387
5388-static void inline arch_read_lock(arch_rwlock_t *lock)
5389+static inline void arch_read_lock(arch_rwlock_t *lock)
5390 {
5391 unsigned long tmp1, tmp2;
5392
5393 __asm__ __volatile__ (
5394 "1: ldsw [%2], %0\n"
5395 " brlz,pn %0, 2f\n"
5396-"4: add %0, 1, %1\n"
5397+"4: addcc %0, 1, %1\n"
5398+
5399+#ifdef CONFIG_PAX_REFCOUNT
5400+" tvs %%icc, 6\n"
5401+#endif
5402+
5403 " cas [%2], %0, %1\n"
5404 " cmp %0, %1\n"
5405 " bne,pn %%icc, 1b\n"
5406@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
5407 " .previous"
5408 : "=&r" (tmp1), "=&r" (tmp2)
5409 : "r" (lock)
5410- : "memory");
5411+ : "memory", "cc");
5412 }
5413
5414-static int inline arch_read_trylock(arch_rwlock_t *lock)
5415+static inline int arch_read_trylock(arch_rwlock_t *lock)
5416 {
5417 int tmp1, tmp2;
5418
5419@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5420 "1: ldsw [%2], %0\n"
5421 " brlz,a,pn %0, 2f\n"
5422 " mov 0, %0\n"
5423-" add %0, 1, %1\n"
5424+" addcc %0, 1, %1\n"
5425+
5426+#ifdef CONFIG_PAX_REFCOUNT
5427+" tvs %%icc, 6\n"
5428+#endif
5429+
5430 " cas [%2], %0, %1\n"
5431 " cmp %0, %1\n"
5432 " bne,pn %%icc, 1b\n"
5433@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5434 return tmp1;
5435 }
5436
5437-static void inline arch_read_unlock(arch_rwlock_t *lock)
5438+static inline void arch_read_unlock(arch_rwlock_t *lock)
5439 {
5440 unsigned long tmp1, tmp2;
5441
5442 __asm__ __volatile__(
5443 "1: lduw [%2], %0\n"
5444-" sub %0, 1, %1\n"
5445+" subcc %0, 1, %1\n"
5446+
5447+#ifdef CONFIG_PAX_REFCOUNT
5448+" tvs %%icc, 6\n"
5449+#endif
5450+
5451 " cas [%2], %0, %1\n"
5452 " cmp %0, %1\n"
5453 " bne,pn %%xcc, 1b\n"
5454@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
5455 : "memory");
5456 }
5457
5458-static void inline arch_write_lock(arch_rwlock_t *lock)
5459+static inline void arch_write_lock(arch_rwlock_t *lock)
5460 {
5461 unsigned long mask, tmp1, tmp2;
5462
5463@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
5464 : "memory");
5465 }
5466
5467-static void inline arch_write_unlock(arch_rwlock_t *lock)
5468+static inline void arch_write_unlock(arch_rwlock_t *lock)
5469 {
5470 __asm__ __volatile__(
5471 " stw %%g0, [%0]"
5472@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
5473 : "memory");
5474 }
5475
5476-static int inline arch_write_trylock(arch_rwlock_t *lock)
5477+static inline int arch_write_trylock(arch_rwlock_t *lock)
5478 {
5479 unsigned long mask, tmp1, tmp2, result;
5480
5481diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5482index c2a1080..21ed218 100644
5483--- a/arch/sparc/include/asm/thread_info_32.h
5484+++ b/arch/sparc/include/asm/thread_info_32.h
5485@@ -50,6 +50,8 @@ struct thread_info {
5486 unsigned long w_saved;
5487
5488 struct restart_block restart_block;
5489+
5490+ unsigned long lowest_stack;
5491 };
5492
5493 /*
5494diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5495index 01d057f..13a7d2f 100644
5496--- a/arch/sparc/include/asm/thread_info_64.h
5497+++ b/arch/sparc/include/asm/thread_info_64.h
5498@@ -63,6 +63,8 @@ struct thread_info {
5499 struct pt_regs *kern_una_regs;
5500 unsigned int kern_una_insn;
5501
5502+ unsigned long lowest_stack;
5503+
5504 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5505 };
5506
5507@@ -214,10 +216,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
5508 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
5509 /* flag bit 6 is available */
5510 #define TIF_32BIT 7 /* 32-bit binary */
5511-/* flag bit 8 is available */
5512+#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
5513 #define TIF_SECCOMP 9 /* secure computing */
5514 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
5515 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
5516+
5517 /* NOTE: Thread flags >= 12 should be ones we have no interest
5518 * in using in assembly, else we can't use the mask as
5519 * an immediate value in instructions such as andcc.
5520@@ -236,12 +239,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
5521 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
5522 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
5523 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
5524+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5525
5526 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
5527 _TIF_DO_NOTIFY_RESUME_MASK | \
5528 _TIF_NEED_RESCHED)
5529 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
5530
5531+#define _TIF_WORK_SYSCALL \
5532+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
5533+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
5534+
5535+
5536 /*
5537 * Thread-synchronous status.
5538 *
5539diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5540index e88fbe5..96b0ce5 100644
5541--- a/arch/sparc/include/asm/uaccess.h
5542+++ b/arch/sparc/include/asm/uaccess.h
5543@@ -1,5 +1,13 @@
5544 #ifndef ___ASM_SPARC_UACCESS_H
5545 #define ___ASM_SPARC_UACCESS_H
5546+
5547+#ifdef __KERNEL__
5548+#ifndef __ASSEMBLY__
5549+#include <linux/types.h>
5550+extern void check_object_size(const void *ptr, unsigned long n, bool to);
5551+#endif
5552+#endif
5553+
5554 #if defined(__sparc__) && defined(__arch64__)
5555 #include <asm/uaccess_64.h>
5556 #else
5557diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5558index 8303ac4..07f333d 100644
5559--- a/arch/sparc/include/asm/uaccess_32.h
5560+++ b/arch/sparc/include/asm/uaccess_32.h
5561@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
5562
5563 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5564 {
5565- if (n && __access_ok((unsigned long) to, n))
5566+ if ((long)n < 0)
5567+ return n;
5568+
5569+ if (n && __access_ok((unsigned long) to, n)) {
5570+ if (!__builtin_constant_p(n))
5571+ check_object_size(from, n, true);
5572 return __copy_user(to, (__force void __user *) from, n);
5573- else
5574+ } else
5575 return n;
5576 }
5577
5578 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5579 {
5580+ if ((long)n < 0)
5581+ return n;
5582+
5583+ if (!__builtin_constant_p(n))
5584+ check_object_size(from, n, true);
5585+
5586 return __copy_user(to, (__force void __user *) from, n);
5587 }
5588
5589 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5590 {
5591- if (n && __access_ok((unsigned long) from, n))
5592+ if ((long)n < 0)
5593+ return n;
5594+
5595+ if (n && __access_ok((unsigned long) from, n)) {
5596+ if (!__builtin_constant_p(n))
5597+ check_object_size(to, n, false);
5598 return __copy_user((__force void __user *) to, from, n);
5599- else
5600+ } else
5601 return n;
5602 }
5603
5604 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5605 {
5606+ if ((long)n < 0)
5607+ return n;
5608+
5609 return __copy_user((__force void __user *) to, from, n);
5610 }
5611
5612diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5613index a1091afb..380228e 100644
5614--- a/arch/sparc/include/asm/uaccess_64.h
5615+++ b/arch/sparc/include/asm/uaccess_64.h
5616@@ -10,6 +10,7 @@
5617 #include <linux/compiler.h>
5618 #include <linux/string.h>
5619 #include <linux/thread_info.h>
5620+#include <linux/kernel.h>
5621 #include <asm/asi.h>
5622 #include <asm/spitfire.h>
5623 #include <asm-generic/uaccess-unaligned.h>
5624@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5625 static inline unsigned long __must_check
5626 copy_from_user(void *to, const void __user *from, unsigned long size)
5627 {
5628- unsigned long ret = ___copy_from_user(to, from, size);
5629+ unsigned long ret;
5630
5631+ if ((long)size < 0 || size > INT_MAX)
5632+ return size;
5633+
5634+ if (!__builtin_constant_p(size))
5635+ check_object_size(to, size, false);
5636+
5637+ ret = ___copy_from_user(to, from, size);
5638 if (unlikely(ret))
5639 ret = copy_from_user_fixup(to, from, size);
5640
5641@@ -229,8 +237,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5642 static inline unsigned long __must_check
5643 copy_to_user(void __user *to, const void *from, unsigned long size)
5644 {
5645- unsigned long ret = ___copy_to_user(to, from, size);
5646+ unsigned long ret;
5647
5648+ if ((long)size < 0 || size > INT_MAX)
5649+ return size;
5650+
5651+ if (!__builtin_constant_p(size))
5652+ check_object_size(from, size, true);
5653+
5654+ ret = ___copy_to_user(to, from, size);
5655 if (unlikely(ret))
5656 ret = copy_to_user_fixup(to, from, size);
5657 return ret;
5658diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5659index cb85458..e063f17 100644
5660--- a/arch/sparc/kernel/Makefile
5661+++ b/arch/sparc/kernel/Makefile
5662@@ -3,7 +3,7 @@
5663 #
5664
5665 asflags-y := -ansi
5666-ccflags-y := -Werror
5667+#ccflags-y := -Werror
5668
5669 extra-y := head_$(BITS).o
5670 extra-y += init_task.o
5671diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5672index efa0754..74b03fe 100644
5673--- a/arch/sparc/kernel/process_32.c
5674+++ b/arch/sparc/kernel/process_32.c
5675@@ -200,7 +200,7 @@ void __show_backtrace(unsigned long fp)
5676 rw->ins[4], rw->ins[5],
5677 rw->ins[6],
5678 rw->ins[7]);
5679- printk("%pS\n", (void *) rw->ins[7]);
5680+ printk("%pA\n", (void *) rw->ins[7]);
5681 rw = (struct reg_window32 *) rw->ins[6];
5682 }
5683 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
5684@@ -267,14 +267,14 @@ void show_regs(struct pt_regs *r)
5685
5686 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5687 r->psr, r->pc, r->npc, r->y, print_tainted());
5688- printk("PC: <%pS>\n", (void *) r->pc);
5689+ printk("PC: <%pA>\n", (void *) r->pc);
5690 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5691 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5692 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5693 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5694 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5695 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5696- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5697+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5698
5699 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5700 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5701@@ -309,7 +309,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5702 rw = (struct reg_window32 *) fp;
5703 pc = rw->ins[7];
5704 printk("[%08lx : ", pc);
5705- printk("%pS ] ", (void *) pc);
5706+ printk("%pA ] ", (void *) pc);
5707 fp = rw->ins[6];
5708 } while (++count < 16);
5709 printk("\n");
5710diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5711index aff0c72..9067b39 100644
5712--- a/arch/sparc/kernel/process_64.c
5713+++ b/arch/sparc/kernel/process_64.c
5714@@ -179,14 +179,14 @@ static void show_regwindow(struct pt_regs *regs)
5715 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5716 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5717 if (regs->tstate & TSTATE_PRIV)
5718- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5719+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5720 }
5721
5722 void show_regs(struct pt_regs *regs)
5723 {
5724 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5725 regs->tpc, regs->tnpc, regs->y, print_tainted());
5726- printk("TPC: <%pS>\n", (void *) regs->tpc);
5727+ printk("TPC: <%pA>\n", (void *) regs->tpc);
5728 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5729 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5730 regs->u_regs[3]);
5731@@ -199,7 +199,7 @@ void show_regs(struct pt_regs *regs)
5732 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5733 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5734 regs->u_regs[15]);
5735- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5736+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5737 show_regwindow(regs);
5738 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
5739 }
5740@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
5741 ((tp && tp->task) ? tp->task->pid : -1));
5742
5743 if (gp->tstate & TSTATE_PRIV) {
5744- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5745+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5746 (void *) gp->tpc,
5747 (void *) gp->o7,
5748 (void *) gp->i7,
5749diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
5750index 6f97c07..b1300ec 100644
5751--- a/arch/sparc/kernel/ptrace_64.c
5752+++ b/arch/sparc/kernel/ptrace_64.c
5753@@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
5754 return ret;
5755 }
5756
5757+#ifdef CONFIG_GRKERNSEC_SETXID
5758+extern void gr_delayed_cred_worker(void);
5759+#endif
5760+
5761 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5762 {
5763 int ret = 0;
5764@@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5765 /* do the secure computing check first */
5766 secure_computing(regs->u_regs[UREG_G1]);
5767
5768+#ifdef CONFIG_GRKERNSEC_SETXID
5769+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5770+ gr_delayed_cred_worker();
5771+#endif
5772+
5773 if (test_thread_flag(TIF_SYSCALL_TRACE))
5774 ret = tracehook_report_syscall_entry(regs);
5775
5776@@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5777
5778 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
5779 {
5780+#ifdef CONFIG_GRKERNSEC_SETXID
5781+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5782+ gr_delayed_cred_worker();
5783+#endif
5784+
5785 audit_syscall_exit(regs);
5786
5787 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
5788diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5789index 42b282f..28ce9f2 100644
5790--- a/arch/sparc/kernel/sys_sparc_32.c
5791+++ b/arch/sparc/kernel/sys_sparc_32.c
5792@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5793 if (ARCH_SUN4C && len > 0x20000000)
5794 return -ENOMEM;
5795 if (!addr)
5796- addr = TASK_UNMAPPED_BASE;
5797+ addr = current->mm->mmap_base;
5798
5799 if (flags & MAP_SHARED)
5800 addr = COLOUR_ALIGN(addr);
5801@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5802 }
5803 if (TASK_SIZE - PAGE_SIZE - len < addr)
5804 return -ENOMEM;
5805- if (!vmm || addr + len <= vmm->vm_start)
5806+ if (check_heap_stack_gap(vmm, addr, len))
5807 return addr;
5808 addr = vmm->vm_end;
5809 if (flags & MAP_SHARED)
5810diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5811index 3ee51f1..2ba4913 100644
5812--- a/arch/sparc/kernel/sys_sparc_64.c
5813+++ b/arch/sparc/kernel/sys_sparc_64.c
5814@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5815 /* We do not accept a shared mapping if it would violate
5816 * cache aliasing constraints.
5817 */
5818- if ((flags & MAP_SHARED) &&
5819+ if ((filp || (flags & MAP_SHARED)) &&
5820 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5821 return -EINVAL;
5822 return addr;
5823@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5824 if (filp || (flags & MAP_SHARED))
5825 do_color_align = 1;
5826
5827+#ifdef CONFIG_PAX_RANDMMAP
5828+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5829+#endif
5830+
5831 if (addr) {
5832 if (do_color_align)
5833 addr = COLOUR_ALIGN(addr, pgoff);
5834@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5835 addr = PAGE_ALIGN(addr);
5836
5837 vma = find_vma(mm, addr);
5838- if (task_size - len >= addr &&
5839- (!vma || addr + len <= vma->vm_start))
5840+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5841 return addr;
5842 }
5843
5844 if (len > mm->cached_hole_size) {
5845- start_addr = addr = mm->free_area_cache;
5846+ start_addr = addr = mm->free_area_cache;
5847 } else {
5848- start_addr = addr = TASK_UNMAPPED_BASE;
5849+ start_addr = addr = mm->mmap_base;
5850 mm->cached_hole_size = 0;
5851 }
5852
5853@@ -174,14 +177,14 @@ full_search:
5854 vma = find_vma(mm, VA_EXCLUDE_END);
5855 }
5856 if (unlikely(task_size < addr)) {
5857- if (start_addr != TASK_UNMAPPED_BASE) {
5858- start_addr = addr = TASK_UNMAPPED_BASE;
5859+ if (start_addr != mm->mmap_base) {
5860+ start_addr = addr = mm->mmap_base;
5861 mm->cached_hole_size = 0;
5862 goto full_search;
5863 }
5864 return -ENOMEM;
5865 }
5866- if (likely(!vma || addr + len <= vma->vm_start)) {
5867+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5868 /*
5869 * Remember the place where we stopped the search:
5870 */
5871@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5872 /* We do not accept a shared mapping if it would violate
5873 * cache aliasing constraints.
5874 */
5875- if ((flags & MAP_SHARED) &&
5876+ if ((filp || (flags & MAP_SHARED)) &&
5877 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5878 return -EINVAL;
5879 return addr;
5880@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5881 addr = PAGE_ALIGN(addr);
5882
5883 vma = find_vma(mm, addr);
5884- if (task_size - len >= addr &&
5885- (!vma || addr + len <= vma->vm_start))
5886+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5887 return addr;
5888 }
5889
5890@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5891 /* make sure it can fit in the remaining address space */
5892 if (likely(addr > len)) {
5893 vma = find_vma(mm, addr-len);
5894- if (!vma || addr <= vma->vm_start) {
5895+ if (check_heap_stack_gap(vma, addr - len, len)) {
5896 /* remember the address as a hint for next time */
5897 return (mm->free_area_cache = addr-len);
5898 }
5899@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5900 if (unlikely(mm->mmap_base < len))
5901 goto bottomup;
5902
5903- addr = mm->mmap_base-len;
5904- if (do_color_align)
5905- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5906+ addr = mm->mmap_base - len;
5907
5908 do {
5909+ if (do_color_align)
5910+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5911 /*
5912 * Lookup failure means no vma is above this address,
5913 * else if new region fits below vma->vm_start,
5914 * return with success:
5915 */
5916 vma = find_vma(mm, addr);
5917- if (likely(!vma || addr+len <= vma->vm_start)) {
5918+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5919 /* remember the address as a hint for next time */
5920 return (mm->free_area_cache = addr);
5921 }
5922@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5923 mm->cached_hole_size = vma->vm_start - addr;
5924
5925 /* try just below the current vma->vm_start */
5926- addr = vma->vm_start-len;
5927- if (do_color_align)
5928- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5929- } while (likely(len < vma->vm_start));
5930+ addr = skip_heap_stack_gap(vma, len);
5931+ } while (!IS_ERR_VALUE(addr));
5932
5933 bottomup:
5934 /*
5935@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5936 gap == RLIM_INFINITY ||
5937 sysctl_legacy_va_layout) {
5938 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5939+
5940+#ifdef CONFIG_PAX_RANDMMAP
5941+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5942+ mm->mmap_base += mm->delta_mmap;
5943+#endif
5944+
5945 mm->get_unmapped_area = arch_get_unmapped_area;
5946 mm->unmap_area = arch_unmap_area;
5947 } else {
5948@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5949 gap = (task_size / 6 * 5);
5950
5951 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5952+
5953+#ifdef CONFIG_PAX_RANDMMAP
5954+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5955+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5956+#endif
5957+
5958 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5959 mm->unmap_area = arch_unmap_area_topdown;
5960 }
5961diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
5962index 1d7e274..b39c527 100644
5963--- a/arch/sparc/kernel/syscalls.S
5964+++ b/arch/sparc/kernel/syscalls.S
5965@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
5966 #endif
5967 .align 32
5968 1: ldx [%g6 + TI_FLAGS], %l5
5969- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
5970+ andcc %l5, _TIF_WORK_SYSCALL, %g0
5971 be,pt %icc, rtrap
5972 nop
5973 call syscall_trace_leave
5974@@ -179,7 +179,7 @@ linux_sparc_syscall32:
5975
5976 srl %i5, 0, %o5 ! IEU1
5977 srl %i2, 0, %o2 ! IEU0 Group
5978- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
5979+ andcc %l0, _TIF_WORK_SYSCALL, %g0
5980 bne,pn %icc, linux_syscall_trace32 ! CTI
5981 mov %i0, %l5 ! IEU1
5982 call %l7 ! CTI Group brk forced
5983@@ -202,7 +202,7 @@ linux_sparc_syscall:
5984
5985 mov %i3, %o3 ! IEU1
5986 mov %i4, %o4 ! IEU0 Group
5987- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
5988+ andcc %l0, _TIF_WORK_SYSCALL, %g0
5989 bne,pn %icc, linux_syscall_trace ! CTI Group
5990 mov %i0, %l5 ! IEU0
5991 2: call %l7 ! CTI Group brk forced
5992@@ -226,7 +226,7 @@ ret_sys_call:
5993
5994 cmp %o0, -ERESTART_RESTARTBLOCK
5995 bgeu,pn %xcc, 1f
5996- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
5997+ andcc %l0, _TIF_WORK_SYSCALL, %l6
5998 80:
5999 /* System call success, clear Carry condition code. */
6000 andn %g3, %g2, %g3
6001@@ -241,7 +241,7 @@ ret_sys_call:
6002 /* System call failure, set Carry condition code.
6003 * Also, get abs(errno) to return to the process.
6004 */
6005- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
6006+ andcc %l0, _TIF_WORK_SYSCALL, %l6
6007 sub %g0, %o0, %o0
6008 or %g3, %g2, %g3
6009 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
6010diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
6011index d2de213..6b22bc3 100644
6012--- a/arch/sparc/kernel/traps_32.c
6013+++ b/arch/sparc/kernel/traps_32.c
6014@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
6015 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
6016 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
6017
6018+extern void gr_handle_kernel_exploit(void);
6019+
6020 void die_if_kernel(char *str, struct pt_regs *regs)
6021 {
6022 static int die_counter;
6023@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6024 count++ < 30 &&
6025 (((unsigned long) rw) >= PAGE_OFFSET) &&
6026 !(((unsigned long) rw) & 0x7)) {
6027- printk("Caller[%08lx]: %pS\n", rw->ins[7],
6028+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
6029 (void *) rw->ins[7]);
6030 rw = (struct reg_window32 *)rw->ins[6];
6031 }
6032 }
6033 printk("Instruction DUMP:");
6034 instruction_dump ((unsigned long *) regs->pc);
6035- if(regs->psr & PSR_PS)
6036+ if(regs->psr & PSR_PS) {
6037+ gr_handle_kernel_exploit();
6038 do_exit(SIGKILL);
6039+ }
6040 do_exit(SIGSEGV);
6041 }
6042
6043diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
6044index c72fdf5..743a344 100644
6045--- a/arch/sparc/kernel/traps_64.c
6046+++ b/arch/sparc/kernel/traps_64.c
6047@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
6048 i + 1,
6049 p->trapstack[i].tstate, p->trapstack[i].tpc,
6050 p->trapstack[i].tnpc, p->trapstack[i].tt);
6051- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
6052+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
6053 }
6054 }
6055
6056@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
6057
6058 lvl -= 0x100;
6059 if (regs->tstate & TSTATE_PRIV) {
6060+
6061+#ifdef CONFIG_PAX_REFCOUNT
6062+ if (lvl == 6)
6063+ pax_report_refcount_overflow(regs);
6064+#endif
6065+
6066 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
6067 die_if_kernel(buffer, regs);
6068 }
6069@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
6070 void bad_trap_tl1(struct pt_regs *regs, long lvl)
6071 {
6072 char buffer[32];
6073-
6074+
6075 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
6076 0, lvl, SIGTRAP) == NOTIFY_STOP)
6077 return;
6078
6079+#ifdef CONFIG_PAX_REFCOUNT
6080+ if (lvl == 6)
6081+ pax_report_refcount_overflow(regs);
6082+#endif
6083+
6084 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6085
6086 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
6087@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
6088 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
6089 printk("%s" "ERROR(%d): ",
6090 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
6091- printk("TPC<%pS>\n", (void *) regs->tpc);
6092+ printk("TPC<%pA>\n", (void *) regs->tpc);
6093 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
6094 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
6095 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
6096@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6097 smp_processor_id(),
6098 (type & 0x1) ? 'I' : 'D',
6099 regs->tpc);
6100- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
6101+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
6102 panic("Irrecoverable Cheetah+ parity error.");
6103 }
6104
6105@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6106 smp_processor_id(),
6107 (type & 0x1) ? 'I' : 'D',
6108 regs->tpc);
6109- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
6110+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
6111 }
6112
6113 struct sun4v_error_entry {
6114@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
6115
6116 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
6117 regs->tpc, tl);
6118- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
6119+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
6120 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6121- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
6122+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
6123 (void *) regs->u_regs[UREG_I7]);
6124 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
6125 "pte[%lx] error[%lx]\n",
6126@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
6127
6128 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
6129 regs->tpc, tl);
6130- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
6131+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
6132 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6133- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
6134+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
6135 (void *) regs->u_regs[UREG_I7]);
6136 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
6137 "pte[%lx] error[%lx]\n",
6138@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6139 fp = (unsigned long)sf->fp + STACK_BIAS;
6140 }
6141
6142- printk(" [%016lx] %pS\n", pc, (void *) pc);
6143+ printk(" [%016lx] %pA\n", pc, (void *) pc);
6144 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6145 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
6146 int index = tsk->curr_ret_stack;
6147 if (tsk->ret_stack && index >= graph) {
6148 pc = tsk->ret_stack[index - graph].ret;
6149- printk(" [%016lx] %pS\n", pc, (void *) pc);
6150+ printk(" [%016lx] %pA\n", pc, (void *) pc);
6151 graph++;
6152 }
6153 }
6154@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
6155 return (struct reg_window *) (fp + STACK_BIAS);
6156 }
6157
6158+extern void gr_handle_kernel_exploit(void);
6159+
6160 void die_if_kernel(char *str, struct pt_regs *regs)
6161 {
6162 static int die_counter;
6163@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6164 while (rw &&
6165 count++ < 30 &&
6166 kstack_valid(tp, (unsigned long) rw)) {
6167- printk("Caller[%016lx]: %pS\n", rw->ins[7],
6168+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
6169 (void *) rw->ins[7]);
6170
6171 rw = kernel_stack_up(rw);
6172@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6173 }
6174 user_instruction_dump ((unsigned int __user *) regs->tpc);
6175 }
6176- if (regs->tstate & TSTATE_PRIV)
6177+ if (regs->tstate & TSTATE_PRIV) {
6178+ gr_handle_kernel_exploit();
6179 do_exit(SIGKILL);
6180+ }
6181 do_exit(SIGSEGV);
6182 }
6183 EXPORT_SYMBOL(die_if_kernel);
6184diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
6185index dae85bc..af1e19d 100644
6186--- a/arch/sparc/kernel/unaligned_64.c
6187+++ b/arch/sparc/kernel/unaligned_64.c
6188@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
6189 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
6190
6191 if (__ratelimit(&ratelimit)) {
6192- printk("Kernel unaligned access at TPC[%lx] %pS\n",
6193+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
6194 regs->tpc, (void *) regs->tpc);
6195 }
6196 }
6197diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
6198index a3fc437..fea9957 100644
6199--- a/arch/sparc/lib/Makefile
6200+++ b/arch/sparc/lib/Makefile
6201@@ -2,7 +2,7 @@
6202 #
6203
6204 asflags-y := -ansi -DST_DIV0=0x02
6205-ccflags-y := -Werror
6206+#ccflags-y := -Werror
6207
6208 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
6209 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
6210diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
6211index 59186e0..f747d7a 100644
6212--- a/arch/sparc/lib/atomic_64.S
6213+++ b/arch/sparc/lib/atomic_64.S
6214@@ -18,7 +18,12 @@
6215 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6216 BACKOFF_SETUP(%o2)
6217 1: lduw [%o1], %g1
6218- add %g1, %o0, %g7
6219+ addcc %g1, %o0, %g7
6220+
6221+#ifdef CONFIG_PAX_REFCOUNT
6222+ tvs %icc, 6
6223+#endif
6224+
6225 cas [%o1], %g1, %g7
6226 cmp %g1, %g7
6227 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6228@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6229 2: BACKOFF_SPIN(%o2, %o3, 1b)
6230 .size atomic_add, .-atomic_add
6231
6232+ .globl atomic_add_unchecked
6233+ .type atomic_add_unchecked,#function
6234+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6235+ BACKOFF_SETUP(%o2)
6236+1: lduw [%o1], %g1
6237+ add %g1, %o0, %g7
6238+ cas [%o1], %g1, %g7
6239+ cmp %g1, %g7
6240+ bne,pn %icc, 2f
6241+ nop
6242+ retl
6243+ nop
6244+2: BACKOFF_SPIN(%o2, %o3, 1b)
6245+ .size atomic_add_unchecked, .-atomic_add_unchecked
6246+
6247 .globl atomic_sub
6248 .type atomic_sub,#function
6249 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6250 BACKOFF_SETUP(%o2)
6251 1: lduw [%o1], %g1
6252- sub %g1, %o0, %g7
6253+ subcc %g1, %o0, %g7
6254+
6255+#ifdef CONFIG_PAX_REFCOUNT
6256+ tvs %icc, 6
6257+#endif
6258+
6259 cas [%o1], %g1, %g7
6260 cmp %g1, %g7
6261 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6262@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6263 2: BACKOFF_SPIN(%o2, %o3, 1b)
6264 .size atomic_sub, .-atomic_sub
6265
6266+ .globl atomic_sub_unchecked
6267+ .type atomic_sub_unchecked,#function
6268+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6269+ BACKOFF_SETUP(%o2)
6270+1: lduw [%o1], %g1
6271+ sub %g1, %o0, %g7
6272+ cas [%o1], %g1, %g7
6273+ cmp %g1, %g7
6274+ bne,pn %icc, 2f
6275+ nop
6276+ retl
6277+ nop
6278+2: BACKOFF_SPIN(%o2, %o3, 1b)
6279+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
6280+
6281 .globl atomic_add_ret
6282 .type atomic_add_ret,#function
6283 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6284 BACKOFF_SETUP(%o2)
6285 1: lduw [%o1], %g1
6286- add %g1, %o0, %g7
6287+ addcc %g1, %o0, %g7
6288+
6289+#ifdef CONFIG_PAX_REFCOUNT
6290+ tvs %icc, 6
6291+#endif
6292+
6293 cas [%o1], %g1, %g7
6294 cmp %g1, %g7
6295 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6296@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6297 2: BACKOFF_SPIN(%o2, %o3, 1b)
6298 .size atomic_add_ret, .-atomic_add_ret
6299
6300+ .globl atomic_add_ret_unchecked
6301+ .type atomic_add_ret_unchecked,#function
6302+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6303+ BACKOFF_SETUP(%o2)
6304+1: lduw [%o1], %g1
6305+ addcc %g1, %o0, %g7
6306+ cas [%o1], %g1, %g7
6307+ cmp %g1, %g7
6308+ bne,pn %icc, 2f
6309+ add %g7, %o0, %g7
6310+ sra %g7, 0, %o0
6311+ retl
6312+ nop
6313+2: BACKOFF_SPIN(%o2, %o3, 1b)
6314+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
6315+
6316 .globl atomic_sub_ret
6317 .type atomic_sub_ret,#function
6318 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6319 BACKOFF_SETUP(%o2)
6320 1: lduw [%o1], %g1
6321- sub %g1, %o0, %g7
6322+ subcc %g1, %o0, %g7
6323+
6324+#ifdef CONFIG_PAX_REFCOUNT
6325+ tvs %icc, 6
6326+#endif
6327+
6328 cas [%o1], %g1, %g7
6329 cmp %g1, %g7
6330 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6331@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6332 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6333 BACKOFF_SETUP(%o2)
6334 1: ldx [%o1], %g1
6335- add %g1, %o0, %g7
6336+ addcc %g1, %o0, %g7
6337+
6338+#ifdef CONFIG_PAX_REFCOUNT
6339+ tvs %xcc, 6
6340+#endif
6341+
6342 casx [%o1], %g1, %g7
6343 cmp %g1, %g7
6344 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6345@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6346 2: BACKOFF_SPIN(%o2, %o3, 1b)
6347 .size atomic64_add, .-atomic64_add
6348
6349+ .globl atomic64_add_unchecked
6350+ .type atomic64_add_unchecked,#function
6351+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6352+ BACKOFF_SETUP(%o2)
6353+1: ldx [%o1], %g1
6354+ addcc %g1, %o0, %g7
6355+ casx [%o1], %g1, %g7
6356+ cmp %g1, %g7
6357+ bne,pn %xcc, 2f
6358+ nop
6359+ retl
6360+ nop
6361+2: BACKOFF_SPIN(%o2, %o3, 1b)
6362+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
6363+
6364 .globl atomic64_sub
6365 .type atomic64_sub,#function
6366 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6367 BACKOFF_SETUP(%o2)
6368 1: ldx [%o1], %g1
6369- sub %g1, %o0, %g7
6370+ subcc %g1, %o0, %g7
6371+
6372+#ifdef CONFIG_PAX_REFCOUNT
6373+ tvs %xcc, 6
6374+#endif
6375+
6376 casx [%o1], %g1, %g7
6377 cmp %g1, %g7
6378 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6379@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6380 2: BACKOFF_SPIN(%o2, %o3, 1b)
6381 .size atomic64_sub, .-atomic64_sub
6382
6383+ .globl atomic64_sub_unchecked
6384+ .type atomic64_sub_unchecked,#function
6385+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6386+ BACKOFF_SETUP(%o2)
6387+1: ldx [%o1], %g1
6388+ subcc %g1, %o0, %g7
6389+ casx [%o1], %g1, %g7
6390+ cmp %g1, %g7
6391+ bne,pn %xcc, 2f
6392+ nop
6393+ retl
6394+ nop
6395+2: BACKOFF_SPIN(%o2, %o3, 1b)
6396+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
6397+
6398 .globl atomic64_add_ret
6399 .type atomic64_add_ret,#function
6400 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6401 BACKOFF_SETUP(%o2)
6402 1: ldx [%o1], %g1
6403- add %g1, %o0, %g7
6404+ addcc %g1, %o0, %g7
6405+
6406+#ifdef CONFIG_PAX_REFCOUNT
6407+ tvs %xcc, 6
6408+#endif
6409+
6410 casx [%o1], %g1, %g7
6411 cmp %g1, %g7
6412 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6413@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6414 2: BACKOFF_SPIN(%o2, %o3, 1b)
6415 .size atomic64_add_ret, .-atomic64_add_ret
6416
6417+ .globl atomic64_add_ret_unchecked
6418+ .type atomic64_add_ret_unchecked,#function
6419+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6420+ BACKOFF_SETUP(%o2)
6421+1: ldx [%o1], %g1
6422+ addcc %g1, %o0, %g7
6423+ casx [%o1], %g1, %g7
6424+ cmp %g1, %g7
6425+ bne,pn %xcc, 2f
6426+ add %g7, %o0, %g7
6427+ mov %g7, %o0
6428+ retl
6429+ nop
6430+2: BACKOFF_SPIN(%o2, %o3, 1b)
6431+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
6432+
6433 .globl atomic64_sub_ret
6434 .type atomic64_sub_ret,#function
6435 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6436 BACKOFF_SETUP(%o2)
6437 1: ldx [%o1], %g1
6438- sub %g1, %o0, %g7
6439+ subcc %g1, %o0, %g7
6440+
6441+#ifdef CONFIG_PAX_REFCOUNT
6442+ tvs %xcc, 6
6443+#endif
6444+
6445 casx [%o1], %g1, %g7
6446 cmp %g1, %g7
6447 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6448diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
6449index f73c224..662af10 100644
6450--- a/arch/sparc/lib/ksyms.c
6451+++ b/arch/sparc/lib/ksyms.c
6452@@ -136,12 +136,18 @@ EXPORT_SYMBOL(__downgrade_write);
6453
6454 /* Atomic counter implementation. */
6455 EXPORT_SYMBOL(atomic_add);
6456+EXPORT_SYMBOL(atomic_add_unchecked);
6457 EXPORT_SYMBOL(atomic_add_ret);
6458+EXPORT_SYMBOL(atomic_add_ret_unchecked);
6459 EXPORT_SYMBOL(atomic_sub);
6460+EXPORT_SYMBOL(atomic_sub_unchecked);
6461 EXPORT_SYMBOL(atomic_sub_ret);
6462 EXPORT_SYMBOL(atomic64_add);
6463+EXPORT_SYMBOL(atomic64_add_unchecked);
6464 EXPORT_SYMBOL(atomic64_add_ret);
6465+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
6466 EXPORT_SYMBOL(atomic64_sub);
6467+EXPORT_SYMBOL(atomic64_sub_unchecked);
6468 EXPORT_SYMBOL(atomic64_sub_ret);
6469
6470 /* Atomic bit operations. */
6471diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
6472index 301421c..e2535d1 100644
6473--- a/arch/sparc/mm/Makefile
6474+++ b/arch/sparc/mm/Makefile
6475@@ -2,7 +2,7 @@
6476 #
6477
6478 asflags-y := -ansi
6479-ccflags-y := -Werror
6480+#ccflags-y := -Werror
6481
6482 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
6483 obj-y += fault_$(BITS).o
6484diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
6485index df3155a..eb708b8 100644
6486--- a/arch/sparc/mm/fault_32.c
6487+++ b/arch/sparc/mm/fault_32.c
6488@@ -21,6 +21,9 @@
6489 #include <linux/perf_event.h>
6490 #include <linux/interrupt.h>
6491 #include <linux/kdebug.h>
6492+#include <linux/slab.h>
6493+#include <linux/pagemap.h>
6494+#include <linux/compiler.h>
6495
6496 #include <asm/page.h>
6497 #include <asm/pgtable.h>
6498@@ -207,6 +210,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
6499 return safe_compute_effective_address(regs, insn);
6500 }
6501
6502+#ifdef CONFIG_PAX_PAGEEXEC
6503+#ifdef CONFIG_PAX_DLRESOLVE
6504+static void pax_emuplt_close(struct vm_area_struct *vma)
6505+{
6506+ vma->vm_mm->call_dl_resolve = 0UL;
6507+}
6508+
6509+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6510+{
6511+ unsigned int *kaddr;
6512+
6513+ vmf->page = alloc_page(GFP_HIGHUSER);
6514+ if (!vmf->page)
6515+ return VM_FAULT_OOM;
6516+
6517+ kaddr = kmap(vmf->page);
6518+ memset(kaddr, 0, PAGE_SIZE);
6519+ kaddr[0] = 0x9DE3BFA8U; /* save */
6520+ flush_dcache_page(vmf->page);
6521+ kunmap(vmf->page);
6522+ return VM_FAULT_MAJOR;
6523+}
6524+
6525+static const struct vm_operations_struct pax_vm_ops = {
6526+ .close = pax_emuplt_close,
6527+ .fault = pax_emuplt_fault
6528+};
6529+
6530+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6531+{
6532+ int ret;
6533+
6534+ INIT_LIST_HEAD(&vma->anon_vma_chain);
6535+ vma->vm_mm = current->mm;
6536+ vma->vm_start = addr;
6537+ vma->vm_end = addr + PAGE_SIZE;
6538+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6539+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6540+ vma->vm_ops = &pax_vm_ops;
6541+
6542+ ret = insert_vm_struct(current->mm, vma);
6543+ if (ret)
6544+ return ret;
6545+
6546+ ++current->mm->total_vm;
6547+ return 0;
6548+}
6549+#endif
6550+
6551+/*
6552+ * PaX: decide what to do with offenders (regs->pc = fault address)
6553+ *
6554+ * returns 1 when task should be killed
6555+ * 2 when patched PLT trampoline was detected
6556+ * 3 when unpatched PLT trampoline was detected
6557+ */
6558+static int pax_handle_fetch_fault(struct pt_regs *regs)
6559+{
6560+
6561+#ifdef CONFIG_PAX_EMUPLT
6562+ int err;
6563+
6564+ do { /* PaX: patched PLT emulation #1 */
6565+ unsigned int sethi1, sethi2, jmpl;
6566+
6567+ err = get_user(sethi1, (unsigned int *)regs->pc);
6568+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6569+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6570+
6571+ if (err)
6572+ break;
6573+
6574+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6575+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6576+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6577+ {
6578+ unsigned int addr;
6579+
6580+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6581+ addr = regs->u_regs[UREG_G1];
6582+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6583+ regs->pc = addr;
6584+ regs->npc = addr+4;
6585+ return 2;
6586+ }
6587+ } while (0);
6588+
6589+ { /* PaX: patched PLT emulation #2 */
6590+ unsigned int ba;
6591+
6592+ err = get_user(ba, (unsigned int *)regs->pc);
6593+
6594+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6595+ unsigned int addr;
6596+
6597+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6598+ regs->pc = addr;
6599+ regs->npc = addr+4;
6600+ return 2;
6601+ }
6602+ }
6603+
6604+ do { /* PaX: patched PLT emulation #3 */
6605+ unsigned int sethi, jmpl, nop;
6606+
6607+ err = get_user(sethi, (unsigned int *)regs->pc);
6608+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6609+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
6610+
6611+ if (err)
6612+ break;
6613+
6614+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6615+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6616+ nop == 0x01000000U)
6617+ {
6618+ unsigned int addr;
6619+
6620+ addr = (sethi & 0x003FFFFFU) << 10;
6621+ regs->u_regs[UREG_G1] = addr;
6622+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6623+ regs->pc = addr;
6624+ regs->npc = addr+4;
6625+ return 2;
6626+ }
6627+ } while (0);
6628+
6629+ do { /* PaX: unpatched PLT emulation step 1 */
6630+ unsigned int sethi, ba, nop;
6631+
6632+ err = get_user(sethi, (unsigned int *)regs->pc);
6633+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
6634+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
6635+
6636+ if (err)
6637+ break;
6638+
6639+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6640+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6641+ nop == 0x01000000U)
6642+ {
6643+ unsigned int addr, save, call;
6644+
6645+ if ((ba & 0xFFC00000U) == 0x30800000U)
6646+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6647+ else
6648+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6649+
6650+ err = get_user(save, (unsigned int *)addr);
6651+ err |= get_user(call, (unsigned int *)(addr+4));
6652+ err |= get_user(nop, (unsigned int *)(addr+8));
6653+ if (err)
6654+ break;
6655+
6656+#ifdef CONFIG_PAX_DLRESOLVE
6657+ if (save == 0x9DE3BFA8U &&
6658+ (call & 0xC0000000U) == 0x40000000U &&
6659+ nop == 0x01000000U)
6660+ {
6661+ struct vm_area_struct *vma;
6662+ unsigned long call_dl_resolve;
6663+
6664+ down_read(&current->mm->mmap_sem);
6665+ call_dl_resolve = current->mm->call_dl_resolve;
6666+ up_read(&current->mm->mmap_sem);
6667+ if (likely(call_dl_resolve))
6668+ goto emulate;
6669+
6670+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6671+
6672+ down_write(&current->mm->mmap_sem);
6673+ if (current->mm->call_dl_resolve) {
6674+ call_dl_resolve = current->mm->call_dl_resolve;
6675+ up_write(&current->mm->mmap_sem);
6676+ if (vma)
6677+ kmem_cache_free(vm_area_cachep, vma);
6678+ goto emulate;
6679+ }
6680+
6681+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6682+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6683+ up_write(&current->mm->mmap_sem);
6684+ if (vma)
6685+ kmem_cache_free(vm_area_cachep, vma);
6686+ return 1;
6687+ }
6688+
6689+ if (pax_insert_vma(vma, call_dl_resolve)) {
6690+ up_write(&current->mm->mmap_sem);
6691+ kmem_cache_free(vm_area_cachep, vma);
6692+ return 1;
6693+ }
6694+
6695+ current->mm->call_dl_resolve = call_dl_resolve;
6696+ up_write(&current->mm->mmap_sem);
6697+
6698+emulate:
6699+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6700+ regs->pc = call_dl_resolve;
6701+ regs->npc = addr+4;
6702+ return 3;
6703+ }
6704+#endif
6705+
6706+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6707+ if ((save & 0xFFC00000U) == 0x05000000U &&
6708+ (call & 0xFFFFE000U) == 0x85C0A000U &&
6709+ nop == 0x01000000U)
6710+ {
6711+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6712+ regs->u_regs[UREG_G2] = addr + 4;
6713+ addr = (save & 0x003FFFFFU) << 10;
6714+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6715+ regs->pc = addr;
6716+ regs->npc = addr+4;
6717+ return 3;
6718+ }
6719+ }
6720+ } while (0);
6721+
6722+ do { /* PaX: unpatched PLT emulation step 2 */
6723+ unsigned int save, call, nop;
6724+
6725+ err = get_user(save, (unsigned int *)(regs->pc-4));
6726+ err |= get_user(call, (unsigned int *)regs->pc);
6727+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
6728+ if (err)
6729+ break;
6730+
6731+ if (save == 0x9DE3BFA8U &&
6732+ (call & 0xC0000000U) == 0x40000000U &&
6733+ nop == 0x01000000U)
6734+ {
6735+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6736+
6737+ regs->u_regs[UREG_RETPC] = regs->pc;
6738+ regs->pc = dl_resolve;
6739+ regs->npc = dl_resolve+4;
6740+ return 3;
6741+ }
6742+ } while (0);
6743+#endif
6744+
6745+ return 1;
6746+}
6747+
6748+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6749+{
6750+ unsigned long i;
6751+
6752+ printk(KERN_ERR "PAX: bytes at PC: ");
6753+ for (i = 0; i < 8; i++) {
6754+ unsigned int c;
6755+ if (get_user(c, (unsigned int *)pc+i))
6756+ printk(KERN_CONT "???????? ");
6757+ else
6758+ printk(KERN_CONT "%08x ", c);
6759+ }
6760+ printk("\n");
6761+}
6762+#endif
6763+
6764 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
6765 int text_fault)
6766 {
6767@@ -282,6 +547,24 @@ good_area:
6768 if(!(vma->vm_flags & VM_WRITE))
6769 goto bad_area;
6770 } else {
6771+
6772+#ifdef CONFIG_PAX_PAGEEXEC
6773+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6774+ up_read(&mm->mmap_sem);
6775+ switch (pax_handle_fetch_fault(regs)) {
6776+
6777+#ifdef CONFIG_PAX_EMUPLT
6778+ case 2:
6779+ case 3:
6780+ return;
6781+#endif
6782+
6783+ }
6784+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6785+ do_group_exit(SIGKILL);
6786+ }
6787+#endif
6788+
6789 /* Allow reads even for write-only mappings */
6790 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6791 goto bad_area;
6792diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6793index 1fe0429..aee2e87 100644
6794--- a/arch/sparc/mm/fault_64.c
6795+++ b/arch/sparc/mm/fault_64.c
6796@@ -21,6 +21,9 @@
6797 #include <linux/kprobes.h>
6798 #include <linux/kdebug.h>
6799 #include <linux/percpu.h>
6800+#include <linux/slab.h>
6801+#include <linux/pagemap.h>
6802+#include <linux/compiler.h>
6803
6804 #include <asm/page.h>
6805 #include <asm/pgtable.h>
6806@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6807 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6808 regs->tpc);
6809 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6810- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6811+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6812 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6813 dump_stack();
6814 unhandled_fault(regs->tpc, current, regs);
6815@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
6816 show_regs(regs);
6817 }
6818
6819+#ifdef CONFIG_PAX_PAGEEXEC
6820+#ifdef CONFIG_PAX_DLRESOLVE
6821+static void pax_emuplt_close(struct vm_area_struct *vma)
6822+{
6823+ vma->vm_mm->call_dl_resolve = 0UL;
6824+}
6825+
6826+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6827+{
6828+ unsigned int *kaddr;
6829+
6830+ vmf->page = alloc_page(GFP_HIGHUSER);
6831+ if (!vmf->page)
6832+ return VM_FAULT_OOM;
6833+
6834+ kaddr = kmap(vmf->page);
6835+ memset(kaddr, 0, PAGE_SIZE);
6836+ kaddr[0] = 0x9DE3BFA8U; /* save */
6837+ flush_dcache_page(vmf->page);
6838+ kunmap(vmf->page);
6839+ return VM_FAULT_MAJOR;
6840+}
6841+
6842+static const struct vm_operations_struct pax_vm_ops = {
6843+ .close = pax_emuplt_close,
6844+ .fault = pax_emuplt_fault
6845+};
6846+
6847+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6848+{
6849+ int ret;
6850+
6851+ INIT_LIST_HEAD(&vma->anon_vma_chain);
6852+ vma->vm_mm = current->mm;
6853+ vma->vm_start = addr;
6854+ vma->vm_end = addr + PAGE_SIZE;
6855+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6856+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6857+ vma->vm_ops = &pax_vm_ops;
6858+
6859+ ret = insert_vm_struct(current->mm, vma);
6860+ if (ret)
6861+ return ret;
6862+
6863+ ++current->mm->total_vm;
6864+ return 0;
6865+}
6866+#endif
6867+
6868+/*
6869+ * PaX: decide what to do with offenders (regs->tpc = fault address)
6870+ *
6871+ * returns 1 when task should be killed
6872+ * 2 when patched PLT trampoline was detected
6873+ * 3 when unpatched PLT trampoline was detected
6874+ */
6875+static int pax_handle_fetch_fault(struct pt_regs *regs)
6876+{
6877+
6878+#ifdef CONFIG_PAX_EMUPLT
6879+ int err;
6880+
6881+ do { /* PaX: patched PLT emulation #1 */
6882+ unsigned int sethi1, sethi2, jmpl;
6883+
6884+ err = get_user(sethi1, (unsigned int *)regs->tpc);
6885+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6886+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6887+
6888+ if (err)
6889+ break;
6890+
6891+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6892+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6893+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6894+ {
6895+ unsigned long addr;
6896+
6897+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6898+ addr = regs->u_regs[UREG_G1];
6899+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6900+
6901+ if (test_thread_flag(TIF_32BIT))
6902+ addr &= 0xFFFFFFFFUL;
6903+
6904+ regs->tpc = addr;
6905+ regs->tnpc = addr+4;
6906+ return 2;
6907+ }
6908+ } while (0);
6909+
6910+ { /* PaX: patched PLT emulation #2 */
6911+ unsigned int ba;
6912+
6913+ err = get_user(ba, (unsigned int *)regs->tpc);
6914+
6915+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6916+ unsigned long addr;
6917+
6918+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6919+
6920+ if (test_thread_flag(TIF_32BIT))
6921+ addr &= 0xFFFFFFFFUL;
6922+
6923+ regs->tpc = addr;
6924+ regs->tnpc = addr+4;
6925+ return 2;
6926+ }
6927+ }
6928+
6929+ do { /* PaX: patched PLT emulation #3 */
6930+ unsigned int sethi, jmpl, nop;
6931+
6932+ err = get_user(sethi, (unsigned int *)regs->tpc);
6933+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6934+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6935+
6936+ if (err)
6937+ break;
6938+
6939+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6940+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6941+ nop == 0x01000000U)
6942+ {
6943+ unsigned long addr;
6944+
6945+ addr = (sethi & 0x003FFFFFU) << 10;
6946+ regs->u_regs[UREG_G1] = addr;
6947+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6948+
6949+ if (test_thread_flag(TIF_32BIT))
6950+ addr &= 0xFFFFFFFFUL;
6951+
6952+ regs->tpc = addr;
6953+ regs->tnpc = addr+4;
6954+ return 2;
6955+ }
6956+ } while (0);
6957+
6958+ do { /* PaX: patched PLT emulation #4 */
6959+ unsigned int sethi, mov1, call, mov2;
6960+
6961+ err = get_user(sethi, (unsigned int *)regs->tpc);
6962+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6963+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
6964+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6965+
6966+ if (err)
6967+ break;
6968+
6969+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6970+ mov1 == 0x8210000FU &&
6971+ (call & 0xC0000000U) == 0x40000000U &&
6972+ mov2 == 0x9E100001U)
6973+ {
6974+ unsigned long addr;
6975+
6976+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6977+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6978+
6979+ if (test_thread_flag(TIF_32BIT))
6980+ addr &= 0xFFFFFFFFUL;
6981+
6982+ regs->tpc = addr;
6983+ regs->tnpc = addr+4;
6984+ return 2;
6985+ }
6986+ } while (0);
6987+
6988+ do { /* PaX: patched PLT emulation #5 */
6989+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6990+
6991+ err = get_user(sethi, (unsigned int *)regs->tpc);
6992+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6993+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6994+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6995+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6996+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6997+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6998+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6999+
7000+ if (err)
7001+ break;
7002+
7003+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7004+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
7005+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7006+ (or1 & 0xFFFFE000U) == 0x82106000U &&
7007+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
7008+ sllx == 0x83287020U &&
7009+ jmpl == 0x81C04005U &&
7010+ nop == 0x01000000U)
7011+ {
7012+ unsigned long addr;
7013+
7014+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7015+ regs->u_regs[UREG_G1] <<= 32;
7016+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7017+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7018+ regs->tpc = addr;
7019+ regs->tnpc = addr+4;
7020+ return 2;
7021+ }
7022+ } while (0);
7023+
7024+ do { /* PaX: patched PLT emulation #6 */
7025+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
7026+
7027+ err = get_user(sethi, (unsigned int *)regs->tpc);
7028+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7029+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7030+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
7031+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
7032+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
7033+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
7034+
7035+ if (err)
7036+ break;
7037+
7038+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7039+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
7040+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7041+ sllx == 0x83287020U &&
7042+ (or & 0xFFFFE000U) == 0x8A116000U &&
7043+ jmpl == 0x81C04005U &&
7044+ nop == 0x01000000U)
7045+ {
7046+ unsigned long addr;
7047+
7048+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
7049+ regs->u_regs[UREG_G1] <<= 32;
7050+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
7051+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7052+ regs->tpc = addr;
7053+ regs->tnpc = addr+4;
7054+ return 2;
7055+ }
7056+ } while (0);
7057+
7058+ do { /* PaX: unpatched PLT emulation step 1 */
7059+ unsigned int sethi, ba, nop;
7060+
7061+ err = get_user(sethi, (unsigned int *)regs->tpc);
7062+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7063+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7064+
7065+ if (err)
7066+ break;
7067+
7068+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7069+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7070+ nop == 0x01000000U)
7071+ {
7072+ unsigned long addr;
7073+ unsigned int save, call;
7074+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
7075+
7076+ if ((ba & 0xFFC00000U) == 0x30800000U)
7077+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7078+ else
7079+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7080+
7081+ if (test_thread_flag(TIF_32BIT))
7082+ addr &= 0xFFFFFFFFUL;
7083+
7084+ err = get_user(save, (unsigned int *)addr);
7085+ err |= get_user(call, (unsigned int *)(addr+4));
7086+ err |= get_user(nop, (unsigned int *)(addr+8));
7087+ if (err)
7088+ break;
7089+
7090+#ifdef CONFIG_PAX_DLRESOLVE
7091+ if (save == 0x9DE3BFA8U &&
7092+ (call & 0xC0000000U) == 0x40000000U &&
7093+ nop == 0x01000000U)
7094+ {
7095+ struct vm_area_struct *vma;
7096+ unsigned long call_dl_resolve;
7097+
7098+ down_read(&current->mm->mmap_sem);
7099+ call_dl_resolve = current->mm->call_dl_resolve;
7100+ up_read(&current->mm->mmap_sem);
7101+ if (likely(call_dl_resolve))
7102+ goto emulate;
7103+
7104+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7105+
7106+ down_write(&current->mm->mmap_sem);
7107+ if (current->mm->call_dl_resolve) {
7108+ call_dl_resolve = current->mm->call_dl_resolve;
7109+ up_write(&current->mm->mmap_sem);
7110+ if (vma)
7111+ kmem_cache_free(vm_area_cachep, vma);
7112+ goto emulate;
7113+ }
7114+
7115+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7116+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7117+ up_write(&current->mm->mmap_sem);
7118+ if (vma)
7119+ kmem_cache_free(vm_area_cachep, vma);
7120+ return 1;
7121+ }
7122+
7123+ if (pax_insert_vma(vma, call_dl_resolve)) {
7124+ up_write(&current->mm->mmap_sem);
7125+ kmem_cache_free(vm_area_cachep, vma);
7126+ return 1;
7127+ }
7128+
7129+ current->mm->call_dl_resolve = call_dl_resolve;
7130+ up_write(&current->mm->mmap_sem);
7131+
7132+emulate:
7133+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7134+ regs->tpc = call_dl_resolve;
7135+ regs->tnpc = addr+4;
7136+ return 3;
7137+ }
7138+#endif
7139+
7140+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7141+ if ((save & 0xFFC00000U) == 0x05000000U &&
7142+ (call & 0xFFFFE000U) == 0x85C0A000U &&
7143+ nop == 0x01000000U)
7144+ {
7145+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7146+ regs->u_regs[UREG_G2] = addr + 4;
7147+ addr = (save & 0x003FFFFFU) << 10;
7148+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7149+
7150+ if (test_thread_flag(TIF_32BIT))
7151+ addr &= 0xFFFFFFFFUL;
7152+
7153+ regs->tpc = addr;
7154+ regs->tnpc = addr+4;
7155+ return 3;
7156+ }
7157+
7158+ /* PaX: 64-bit PLT stub */
7159+ err = get_user(sethi1, (unsigned int *)addr);
7160+ err |= get_user(sethi2, (unsigned int *)(addr+4));
7161+ err |= get_user(or1, (unsigned int *)(addr+8));
7162+ err |= get_user(or2, (unsigned int *)(addr+12));
7163+ err |= get_user(sllx, (unsigned int *)(addr+16));
7164+ err |= get_user(add, (unsigned int *)(addr+20));
7165+ err |= get_user(jmpl, (unsigned int *)(addr+24));
7166+ err |= get_user(nop, (unsigned int *)(addr+28));
7167+ if (err)
7168+ break;
7169+
7170+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
7171+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7172+ (or1 & 0xFFFFE000U) == 0x88112000U &&
7173+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
7174+ sllx == 0x89293020U &&
7175+ add == 0x8A010005U &&
7176+ jmpl == 0x89C14000U &&
7177+ nop == 0x01000000U)
7178+ {
7179+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7180+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7181+ regs->u_regs[UREG_G4] <<= 32;
7182+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7183+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
7184+ regs->u_regs[UREG_G4] = addr + 24;
7185+ addr = regs->u_regs[UREG_G5];
7186+ regs->tpc = addr;
7187+ regs->tnpc = addr+4;
7188+ return 3;
7189+ }
7190+ }
7191+ } while (0);
7192+
7193+#ifdef CONFIG_PAX_DLRESOLVE
7194+ do { /* PaX: unpatched PLT emulation step 2 */
7195+ unsigned int save, call, nop;
7196+
7197+ err = get_user(save, (unsigned int *)(regs->tpc-4));
7198+ err |= get_user(call, (unsigned int *)regs->tpc);
7199+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
7200+ if (err)
7201+ break;
7202+
7203+ if (save == 0x9DE3BFA8U &&
7204+ (call & 0xC0000000U) == 0x40000000U &&
7205+ nop == 0x01000000U)
7206+ {
7207+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7208+
7209+ if (test_thread_flag(TIF_32BIT))
7210+ dl_resolve &= 0xFFFFFFFFUL;
7211+
7212+ regs->u_regs[UREG_RETPC] = regs->tpc;
7213+ regs->tpc = dl_resolve;
7214+ regs->tnpc = dl_resolve+4;
7215+ return 3;
7216+ }
7217+ } while (0);
7218+#endif
7219+
7220+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
7221+ unsigned int sethi, ba, nop;
7222+
7223+ err = get_user(sethi, (unsigned int *)regs->tpc);
7224+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7225+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7226+
7227+ if (err)
7228+ break;
7229+
7230+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7231+ (ba & 0xFFF00000U) == 0x30600000U &&
7232+ nop == 0x01000000U)
7233+ {
7234+ unsigned long addr;
7235+
7236+ addr = (sethi & 0x003FFFFFU) << 10;
7237+ regs->u_regs[UREG_G1] = addr;
7238+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7239+
7240+ if (test_thread_flag(TIF_32BIT))
7241+ addr &= 0xFFFFFFFFUL;
7242+
7243+ regs->tpc = addr;
7244+ regs->tnpc = addr+4;
7245+ return 2;
7246+ }
7247+ } while (0);
7248+
7249+#endif
7250+
7251+ return 1;
7252+}
7253+
7254+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7255+{
7256+ unsigned long i;
7257+
7258+ printk(KERN_ERR "PAX: bytes at PC: ");
7259+ for (i = 0; i < 8; i++) {
7260+ unsigned int c;
7261+ if (get_user(c, (unsigned int *)pc+i))
7262+ printk(KERN_CONT "???????? ");
7263+ else
7264+ printk(KERN_CONT "%08x ", c);
7265+ }
7266+ printk("\n");
7267+}
7268+#endif
7269+
7270 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7271 {
7272 struct mm_struct *mm = current->mm;
7273@@ -343,6 +797,29 @@ retry:
7274 if (!vma)
7275 goto bad_area;
7276
7277+#ifdef CONFIG_PAX_PAGEEXEC
7278+ /* PaX: detect ITLB misses on non-exec pages */
7279+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
7280+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
7281+ {
7282+ if (address != regs->tpc)
7283+ goto good_area;
7284+
7285+ up_read(&mm->mmap_sem);
7286+ switch (pax_handle_fetch_fault(regs)) {
7287+
7288+#ifdef CONFIG_PAX_EMUPLT
7289+ case 2:
7290+ case 3:
7291+ return;
7292+#endif
7293+
7294+ }
7295+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
7296+ do_group_exit(SIGKILL);
7297+ }
7298+#endif
7299+
7300 /* Pure DTLB misses do not tell us whether the fault causing
7301 * load/store/atomic was a write or not, it only says that there
7302 * was no match. So in such a case we (carefully) read the
7303diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
7304index 07e1453..0a7d9e9 100644
7305--- a/arch/sparc/mm/hugetlbpage.c
7306+++ b/arch/sparc/mm/hugetlbpage.c
7307@@ -67,7 +67,7 @@ full_search:
7308 }
7309 return -ENOMEM;
7310 }
7311- if (likely(!vma || addr + len <= vma->vm_start)) {
7312+ if (likely(check_heap_stack_gap(vma, addr, len))) {
7313 /*
7314 * Remember the place where we stopped the search:
7315 */
7316@@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7317 /* make sure it can fit in the remaining address space */
7318 if (likely(addr > len)) {
7319 vma = find_vma(mm, addr-len);
7320- if (!vma || addr <= vma->vm_start) {
7321+ if (check_heap_stack_gap(vma, addr - len, len)) {
7322 /* remember the address as a hint for next time */
7323 return (mm->free_area_cache = addr-len);
7324 }
7325@@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7326 if (unlikely(mm->mmap_base < len))
7327 goto bottomup;
7328
7329- addr = (mm->mmap_base-len) & HPAGE_MASK;
7330+ addr = mm->mmap_base - len;
7331
7332 do {
7333+ addr &= HPAGE_MASK;
7334 /*
7335 * Lookup failure means no vma is above this address,
7336 * else if new region fits below vma->vm_start,
7337 * return with success:
7338 */
7339 vma = find_vma(mm, addr);
7340- if (likely(!vma || addr+len <= vma->vm_start)) {
7341+ if (likely(check_heap_stack_gap(vma, addr, len))) {
7342 /* remember the address as a hint for next time */
7343 return (mm->free_area_cache = addr);
7344 }
7345@@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7346 mm->cached_hole_size = vma->vm_start - addr;
7347
7348 /* try just below the current vma->vm_start */
7349- addr = (vma->vm_start-len) & HPAGE_MASK;
7350- } while (likely(len < vma->vm_start));
7351+ addr = skip_heap_stack_gap(vma, len);
7352+ } while (!IS_ERR_VALUE(addr));
7353
7354 bottomup:
7355 /*
7356@@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
7357 if (addr) {
7358 addr = ALIGN(addr, HPAGE_SIZE);
7359 vma = find_vma(mm, addr);
7360- if (task_size - len >= addr &&
7361- (!vma || addr + len <= vma->vm_start))
7362+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
7363 return addr;
7364 }
7365 if (mm->get_unmapped_area == arch_get_unmapped_area)
7366diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
7367index c5f9021..7591bae 100644
7368--- a/arch/sparc/mm/init_32.c
7369+++ b/arch/sparc/mm/init_32.c
7370@@ -315,6 +315,9 @@ extern void device_scan(void);
7371 pgprot_t PAGE_SHARED __read_mostly;
7372 EXPORT_SYMBOL(PAGE_SHARED);
7373
7374+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
7375+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
7376+
7377 void __init paging_init(void)
7378 {
7379 switch(sparc_cpu_model) {
7380@@ -343,17 +346,17 @@ void __init paging_init(void)
7381
7382 /* Initialize the protection map with non-constant, MMU dependent values. */
7383 protection_map[0] = PAGE_NONE;
7384- protection_map[1] = PAGE_READONLY;
7385- protection_map[2] = PAGE_COPY;
7386- protection_map[3] = PAGE_COPY;
7387+ protection_map[1] = PAGE_READONLY_NOEXEC;
7388+ protection_map[2] = PAGE_COPY_NOEXEC;
7389+ protection_map[3] = PAGE_COPY_NOEXEC;
7390 protection_map[4] = PAGE_READONLY;
7391 protection_map[5] = PAGE_READONLY;
7392 protection_map[6] = PAGE_COPY;
7393 protection_map[7] = PAGE_COPY;
7394 protection_map[8] = PAGE_NONE;
7395- protection_map[9] = PAGE_READONLY;
7396- protection_map[10] = PAGE_SHARED;
7397- protection_map[11] = PAGE_SHARED;
7398+ protection_map[9] = PAGE_READONLY_NOEXEC;
7399+ protection_map[10] = PAGE_SHARED_NOEXEC;
7400+ protection_map[11] = PAGE_SHARED_NOEXEC;
7401 protection_map[12] = PAGE_READONLY;
7402 protection_map[13] = PAGE_READONLY;
7403 protection_map[14] = PAGE_SHARED;
7404diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
7405index cbef74e..c38fead 100644
7406--- a/arch/sparc/mm/srmmu.c
7407+++ b/arch/sparc/mm/srmmu.c
7408@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
7409 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
7410 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
7411 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
7412+
7413+#ifdef CONFIG_PAX_PAGEEXEC
7414+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
7415+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
7416+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
7417+#endif
7418+
7419 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
7420 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
7421
7422diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
7423index f4500c6..889656c 100644
7424--- a/arch/tile/include/asm/atomic_64.h
7425+++ b/arch/tile/include/asm/atomic_64.h
7426@@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
7427
7428 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7429
7430+#define atomic64_read_unchecked(v) atomic64_read(v)
7431+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7432+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7433+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7434+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7435+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7436+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7437+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7438+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7439+
7440 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
7441 #define smp_mb__before_atomic_dec() smp_mb()
7442 #define smp_mb__after_atomic_dec() smp_mb()
7443diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
7444index 392e533..536b092 100644
7445--- a/arch/tile/include/asm/cache.h
7446+++ b/arch/tile/include/asm/cache.h
7447@@ -15,11 +15,12 @@
7448 #ifndef _ASM_TILE_CACHE_H
7449 #define _ASM_TILE_CACHE_H
7450
7451+#include <linux/const.h>
7452 #include <arch/chip.h>
7453
7454 /* bytes per L1 data cache line */
7455 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
7456-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7457+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7458
7459 /* bytes per L2 cache line */
7460 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
7461diff --git a/arch/um/Makefile b/arch/um/Makefile
7462index 55c0661..86ad413 100644
7463--- a/arch/um/Makefile
7464+++ b/arch/um/Makefile
7465@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
7466 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
7467 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
7468
7469+ifdef CONSTIFY_PLUGIN
7470+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7471+endif
7472+
7473 #This will adjust *FLAGS accordingly to the platform.
7474 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
7475
7476diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
7477index 19e1bdd..3665b77 100644
7478--- a/arch/um/include/asm/cache.h
7479+++ b/arch/um/include/asm/cache.h
7480@@ -1,6 +1,7 @@
7481 #ifndef __UM_CACHE_H
7482 #define __UM_CACHE_H
7483
7484+#include <linux/const.h>
7485
7486 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
7487 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7488@@ -12,6 +13,6 @@
7489 # define L1_CACHE_SHIFT 5
7490 #endif
7491
7492-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7493+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7494
7495 #endif
7496diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
7497index 6c03acd..a5e0215 100644
7498--- a/arch/um/include/asm/kmap_types.h
7499+++ b/arch/um/include/asm/kmap_types.h
7500@@ -23,6 +23,7 @@ enum km_type {
7501 KM_IRQ1,
7502 KM_SOFTIRQ0,
7503 KM_SOFTIRQ1,
7504+ KM_CLEARPAGE,
7505 KM_TYPE_NR
7506 };
7507
7508diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
7509index 7cfc3ce..cbd1a58 100644
7510--- a/arch/um/include/asm/page.h
7511+++ b/arch/um/include/asm/page.h
7512@@ -14,6 +14,9 @@
7513 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
7514 #define PAGE_MASK (~(PAGE_SIZE-1))
7515
7516+#define ktla_ktva(addr) (addr)
7517+#define ktva_ktla(addr) (addr)
7518+
7519 #ifndef __ASSEMBLY__
7520
7521 struct page;
7522diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
7523index 0032f92..cd151e0 100644
7524--- a/arch/um/include/asm/pgtable-3level.h
7525+++ b/arch/um/include/asm/pgtable-3level.h
7526@@ -58,6 +58,7 @@
7527 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
7528 #define pud_populate(mm, pud, pmd) \
7529 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
7530+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
7531
7532 #ifdef CONFIG_64BIT
7533 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
7534diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
7535index 2b73ded..804f540 100644
7536--- a/arch/um/kernel/process.c
7537+++ b/arch/um/kernel/process.c
7538@@ -404,22 +404,6 @@ int singlestepping(void * t)
7539 return 2;
7540 }
7541
7542-/*
7543- * Only x86 and x86_64 have an arch_align_stack().
7544- * All other arches have "#define arch_align_stack(x) (x)"
7545- * in their asm/system.h
7546- * As this is included in UML from asm-um/system-generic.h,
7547- * we can use it to behave as the subarch does.
7548- */
7549-#ifndef arch_align_stack
7550-unsigned long arch_align_stack(unsigned long sp)
7551-{
7552- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7553- sp -= get_random_int() % 8192;
7554- return sp & ~0xf;
7555-}
7556-#endif
7557-
7558 unsigned long get_wchan(struct task_struct *p)
7559 {
7560 unsigned long stack_page, sp, ip;
7561diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
7562index ad8f795..2c7eec6 100644
7563--- a/arch/unicore32/include/asm/cache.h
7564+++ b/arch/unicore32/include/asm/cache.h
7565@@ -12,8 +12,10 @@
7566 #ifndef __UNICORE_CACHE_H__
7567 #define __UNICORE_CACHE_H__
7568
7569-#define L1_CACHE_SHIFT (5)
7570-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7571+#include <linux/const.h>
7572+
7573+#define L1_CACHE_SHIFT 5
7574+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7575
7576 /*
7577 * Memory returned by kmalloc() may be used for DMA, so we must make
7578diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7579index c9866b0..fe53aef 100644
7580--- a/arch/x86/Kconfig
7581+++ b/arch/x86/Kconfig
7582@@ -229,7 +229,7 @@ config X86_HT
7583
7584 config X86_32_LAZY_GS
7585 def_bool y
7586- depends on X86_32 && !CC_STACKPROTECTOR
7587+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7588
7589 config ARCH_HWEIGHT_CFLAGS
7590 string
7591@@ -1042,7 +1042,7 @@ choice
7592
7593 config NOHIGHMEM
7594 bool "off"
7595- depends on !X86_NUMAQ
7596+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7597 ---help---
7598 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7599 However, the address space of 32-bit x86 processors is only 4
7600@@ -1079,7 +1079,7 @@ config NOHIGHMEM
7601
7602 config HIGHMEM4G
7603 bool "4GB"
7604- depends on !X86_NUMAQ
7605+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7606 ---help---
7607 Select this if you have a 32-bit processor and between 1 and 4
7608 gigabytes of physical RAM.
7609@@ -1133,7 +1133,7 @@ config PAGE_OFFSET
7610 hex
7611 default 0xB0000000 if VMSPLIT_3G_OPT
7612 default 0x80000000 if VMSPLIT_2G
7613- default 0x78000000 if VMSPLIT_2G_OPT
7614+ default 0x70000000 if VMSPLIT_2G_OPT
7615 default 0x40000000 if VMSPLIT_1G
7616 default 0xC0000000
7617 depends on X86_32
7618@@ -1523,6 +1523,7 @@ config SECCOMP
7619
7620 config CC_STACKPROTECTOR
7621 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7622+ depends on X86_64 || !PAX_MEMORY_UDEREF
7623 ---help---
7624 This option turns on the -fstack-protector GCC feature. This
7625 feature puts, at the beginning of functions, a canary value on
7626@@ -1580,6 +1581,7 @@ config KEXEC_JUMP
7627 config PHYSICAL_START
7628 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
7629 default "0x1000000"
7630+ range 0x400000 0x40000000
7631 ---help---
7632 This gives the physical address where the kernel is loaded.
7633
7634@@ -1643,6 +1645,7 @@ config X86_NEED_RELOCS
7635 config PHYSICAL_ALIGN
7636 hex "Alignment value to which kernel should be aligned" if X86_32
7637 default "0x1000000"
7638+ range 0x400000 0x1000000 if PAX_KERNEXEC
7639 range 0x2000 0x1000000
7640 ---help---
7641 This value puts the alignment restrictions on physical address
7642@@ -1674,9 +1677,10 @@ config HOTPLUG_CPU
7643 Say N if you want to disable CPU hotplug.
7644
7645 config COMPAT_VDSO
7646- def_bool y
7647+ def_bool n
7648 prompt "Compat VDSO support"
7649 depends on X86_32 || IA32_EMULATION
7650+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7651 ---help---
7652 Map the 32-bit VDSO to the predictable old-style address too.
7653
7654diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7655index 706e12e..62e4feb 100644
7656--- a/arch/x86/Kconfig.cpu
7657+++ b/arch/x86/Kconfig.cpu
7658@@ -334,7 +334,7 @@ config X86_PPRO_FENCE
7659
7660 config X86_F00F_BUG
7661 def_bool y
7662- depends on M586MMX || M586TSC || M586 || M486 || M386
7663+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7664
7665 config X86_INVD_BUG
7666 def_bool y
7667@@ -358,7 +358,7 @@ config X86_POPAD_OK
7668
7669 config X86_ALIGNMENT_16
7670 def_bool y
7671- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7672+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7673
7674 config X86_INTEL_USERCOPY
7675 def_bool y
7676@@ -404,7 +404,7 @@ config X86_CMPXCHG64
7677 # generates cmov.
7678 config X86_CMOV
7679 def_bool y
7680- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7681+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7682
7683 config X86_MINIMUM_CPU_FAMILY
7684 int
7685diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7686index e46c214..7c72b55 100644
7687--- a/arch/x86/Kconfig.debug
7688+++ b/arch/x86/Kconfig.debug
7689@@ -84,7 +84,7 @@ config X86_PTDUMP
7690 config DEBUG_RODATA
7691 bool "Write protect kernel read-only data structures"
7692 default y
7693- depends on DEBUG_KERNEL
7694+ depends on DEBUG_KERNEL && BROKEN
7695 ---help---
7696 Mark the kernel read-only data as write-protected in the pagetables,
7697 in order to catch accidental (and incorrect) writes to such const
7698@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
7699
7700 config DEBUG_SET_MODULE_RONX
7701 bool "Set loadable kernel module data as NX and text as RO"
7702- depends on MODULES
7703+ depends on MODULES && BROKEN
7704 ---help---
7705 This option helps catch unintended modifications to loadable
7706 kernel module's text and read-only data. It also prevents execution
7707diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7708index b1c611e..2c1a823 100644
7709--- a/arch/x86/Makefile
7710+++ b/arch/x86/Makefile
7711@@ -46,6 +46,7 @@ else
7712 UTS_MACHINE := x86_64
7713 CHECKFLAGS += -D__x86_64__ -m64
7714
7715+ biarch := $(call cc-option,-m64)
7716 KBUILD_AFLAGS += -m64
7717 KBUILD_CFLAGS += -m64
7718
7719@@ -222,3 +223,12 @@ define archhelp
7720 echo ' FDARGS="..." arguments for the booted kernel'
7721 echo ' FDINITRD=file initrd for the booted kernel'
7722 endef
7723+
7724+define OLD_LD
7725+
7726+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7727+*** Please upgrade your binutils to 2.18 or newer
7728+endef
7729+
7730+archprepare:
7731+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7732diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7733index 5a747dd..ff7b12c 100644
7734--- a/arch/x86/boot/Makefile
7735+++ b/arch/x86/boot/Makefile
7736@@ -64,6 +64,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7737 $(call cc-option, -fno-stack-protector) \
7738 $(call cc-option, -mpreferred-stack-boundary=2)
7739 KBUILD_CFLAGS += $(call cc-option, -m32)
7740+ifdef CONSTIFY_PLUGIN
7741+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7742+endif
7743 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7744 GCOV_PROFILE := n
7745
7746diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7747index 878e4b9..20537ab 100644
7748--- a/arch/x86/boot/bitops.h
7749+++ b/arch/x86/boot/bitops.h
7750@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7751 u8 v;
7752 const u32 *p = (const u32 *)addr;
7753
7754- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7755+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7756 return v;
7757 }
7758
7759@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7760
7761 static inline void set_bit(int nr, void *addr)
7762 {
7763- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7764+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7765 }
7766
7767 #endif /* BOOT_BITOPS_H */
7768diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7769index 18997e5..83d9c67 100644
7770--- a/arch/x86/boot/boot.h
7771+++ b/arch/x86/boot/boot.h
7772@@ -85,7 +85,7 @@ static inline void io_delay(void)
7773 static inline u16 ds(void)
7774 {
7775 u16 seg;
7776- asm("movw %%ds,%0" : "=rm" (seg));
7777+ asm volatile("movw %%ds,%0" : "=rm" (seg));
7778 return seg;
7779 }
7780
7781@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7782 static inline int memcmp(const void *s1, const void *s2, size_t len)
7783 {
7784 u8 diff;
7785- asm("repe; cmpsb; setnz %0"
7786+ asm volatile("repe; cmpsb; setnz %0"
7787 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7788 return diff;
7789 }
7790diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7791index e398bb5..3a382ca 100644
7792--- a/arch/x86/boot/compressed/Makefile
7793+++ b/arch/x86/boot/compressed/Makefile
7794@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7795 KBUILD_CFLAGS += $(cflags-y)
7796 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7797 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7798+ifdef CONSTIFY_PLUGIN
7799+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7800+endif
7801
7802 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7803 GCOV_PROFILE := n
7804diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
7805index 0cdfc0d..6e79437 100644
7806--- a/arch/x86/boot/compressed/eboot.c
7807+++ b/arch/x86/boot/compressed/eboot.c
7808@@ -122,7 +122,6 @@ again:
7809 *addr = max_addr;
7810 }
7811
7812-free_pool:
7813 efi_call_phys1(sys_table->boottime->free_pool, map);
7814
7815 fail:
7816@@ -186,7 +185,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
7817 if (i == map_size / desc_size)
7818 status = EFI_NOT_FOUND;
7819
7820-free_pool:
7821 efi_call_phys1(sys_table->boottime->free_pool, map);
7822 fail:
7823 return status;
7824diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7825index c85e3ac..6f5aa80 100644
7826--- a/arch/x86/boot/compressed/head_32.S
7827+++ b/arch/x86/boot/compressed/head_32.S
7828@@ -106,7 +106,7 @@ preferred_addr:
7829 notl %eax
7830 andl %eax, %ebx
7831 #else
7832- movl $LOAD_PHYSICAL_ADDR, %ebx
7833+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7834 #endif
7835
7836 /* Target address to relocate to for decompression */
7837@@ -192,7 +192,7 @@ relocated:
7838 * and where it was actually loaded.
7839 */
7840 movl %ebp, %ebx
7841- subl $LOAD_PHYSICAL_ADDR, %ebx
7842+ subl $____LOAD_PHYSICAL_ADDR, %ebx
7843 jz 2f /* Nothing to be done if loaded at compiled addr. */
7844 /*
7845 * Process relocations.
7846@@ -200,8 +200,7 @@ relocated:
7847
7848 1: subl $4, %edi
7849 movl (%edi), %ecx
7850- testl %ecx, %ecx
7851- jz 2f
7852+ jecxz 2f
7853 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7854 jmp 1b
7855 2:
7856diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7857index 87e03a1..0d94c76 100644
7858--- a/arch/x86/boot/compressed/head_64.S
7859+++ b/arch/x86/boot/compressed/head_64.S
7860@@ -91,7 +91,7 @@ ENTRY(startup_32)
7861 notl %eax
7862 andl %eax, %ebx
7863 #else
7864- movl $LOAD_PHYSICAL_ADDR, %ebx
7865+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7866 #endif
7867
7868 /* Target address to relocate to for decompression */
7869@@ -263,7 +263,7 @@ preferred_addr:
7870 notq %rax
7871 andq %rax, %rbp
7872 #else
7873- movq $LOAD_PHYSICAL_ADDR, %rbp
7874+ movq $____LOAD_PHYSICAL_ADDR, %rbp
7875 #endif
7876
7877 /* Target address to relocate to for decompression */
7878diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7879index 7116dcb..d9ae1d7 100644
7880--- a/arch/x86/boot/compressed/misc.c
7881+++ b/arch/x86/boot/compressed/misc.c
7882@@ -310,7 +310,7 @@ static void parse_elf(void *output)
7883 case PT_LOAD:
7884 #ifdef CONFIG_RELOCATABLE
7885 dest = output;
7886- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7887+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7888 #else
7889 dest = (void *)(phdr->p_paddr);
7890 #endif
7891@@ -365,7 +365,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7892 error("Destination address too large");
7893 #endif
7894 #ifndef CONFIG_RELOCATABLE
7895- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7896+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7897 error("Wrong destination address");
7898 #endif
7899
7900diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7901index 4d3ff03..e4972ff 100644
7902--- a/arch/x86/boot/cpucheck.c
7903+++ b/arch/x86/boot/cpucheck.c
7904@@ -74,7 +74,7 @@ static int has_fpu(void)
7905 u16 fcw = -1, fsw = -1;
7906 u32 cr0;
7907
7908- asm("movl %%cr0,%0" : "=r" (cr0));
7909+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
7910 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7911 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7912 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7913@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7914 {
7915 u32 f0, f1;
7916
7917- asm("pushfl ; "
7918+ asm volatile("pushfl ; "
7919 "pushfl ; "
7920 "popl %0 ; "
7921 "movl %0,%1 ; "
7922@@ -115,7 +115,7 @@ static void get_flags(void)
7923 set_bit(X86_FEATURE_FPU, cpu.flags);
7924
7925 if (has_eflag(X86_EFLAGS_ID)) {
7926- asm("cpuid"
7927+ asm volatile("cpuid"
7928 : "=a" (max_intel_level),
7929 "=b" (cpu_vendor[0]),
7930 "=d" (cpu_vendor[1]),
7931@@ -124,7 +124,7 @@ static void get_flags(void)
7932
7933 if (max_intel_level >= 0x00000001 &&
7934 max_intel_level <= 0x0000ffff) {
7935- asm("cpuid"
7936+ asm volatile("cpuid"
7937 : "=a" (tfms),
7938 "=c" (cpu.flags[4]),
7939 "=d" (cpu.flags[0])
7940@@ -136,7 +136,7 @@ static void get_flags(void)
7941 cpu.model += ((tfms >> 16) & 0xf) << 4;
7942 }
7943
7944- asm("cpuid"
7945+ asm volatile("cpuid"
7946 : "=a" (max_amd_level)
7947 : "a" (0x80000000)
7948 : "ebx", "ecx", "edx");
7949@@ -144,7 +144,7 @@ static void get_flags(void)
7950 if (max_amd_level >= 0x80000001 &&
7951 max_amd_level <= 0x8000ffff) {
7952 u32 eax = 0x80000001;
7953- asm("cpuid"
7954+ asm volatile("cpuid"
7955 : "+a" (eax),
7956 "=c" (cpu.flags[6]),
7957 "=d" (cpu.flags[1])
7958@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7959 u32 ecx = MSR_K7_HWCR;
7960 u32 eax, edx;
7961
7962- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7963+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7964 eax &= ~(1 << 15);
7965- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7966+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7967
7968 get_flags(); /* Make sure it really did something */
7969 err = check_flags();
7970@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7971 u32 ecx = MSR_VIA_FCR;
7972 u32 eax, edx;
7973
7974- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7975+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7976 eax |= (1<<1)|(1<<7);
7977- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7978+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7979
7980 set_bit(X86_FEATURE_CX8, cpu.flags);
7981 err = check_flags();
7982@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7983 u32 eax, edx;
7984 u32 level = 1;
7985
7986- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7987- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7988- asm("cpuid"
7989+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7990+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7991+ asm volatile("cpuid"
7992 : "+a" (level), "=d" (cpu.flags[0])
7993 : : "ecx", "ebx");
7994- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7995+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7996
7997 err = check_flags();
7998 }
7999diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
8000index f1bbeeb..aff09cb 100644
8001--- a/arch/x86/boot/header.S
8002+++ b/arch/x86/boot/header.S
8003@@ -372,7 +372,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
8004 # single linked list of
8005 # struct setup_data
8006
8007-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
8008+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
8009
8010 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
8011 #define VO_INIT_SIZE (VO__end - VO__text)
8012diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
8013index db75d07..8e6d0af 100644
8014--- a/arch/x86/boot/memory.c
8015+++ b/arch/x86/boot/memory.c
8016@@ -19,7 +19,7 @@
8017
8018 static int detect_memory_e820(void)
8019 {
8020- int count = 0;
8021+ unsigned int count = 0;
8022 struct biosregs ireg, oreg;
8023 struct e820entry *desc = boot_params.e820_map;
8024 static struct e820entry buf; /* static so it is zeroed */
8025diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
8026index 11e8c6e..fdbb1ed 100644
8027--- a/arch/x86/boot/video-vesa.c
8028+++ b/arch/x86/boot/video-vesa.c
8029@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
8030
8031 boot_params.screen_info.vesapm_seg = oreg.es;
8032 boot_params.screen_info.vesapm_off = oreg.di;
8033+ boot_params.screen_info.vesapm_size = oreg.cx;
8034 }
8035
8036 /*
8037diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
8038index 43eda28..5ab5fdb 100644
8039--- a/arch/x86/boot/video.c
8040+++ b/arch/x86/boot/video.c
8041@@ -96,7 +96,7 @@ static void store_mode_params(void)
8042 static unsigned int get_entry(void)
8043 {
8044 char entry_buf[4];
8045- int i, len = 0;
8046+ unsigned int i, len = 0;
8047 int key;
8048 unsigned int v;
8049
8050diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
8051index 5b577d5..3c1fed4 100644
8052--- a/arch/x86/crypto/aes-x86_64-asm_64.S
8053+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
8054@@ -8,6 +8,8 @@
8055 * including this sentence is retained in full.
8056 */
8057
8058+#include <asm/alternative-asm.h>
8059+
8060 .extern crypto_ft_tab
8061 .extern crypto_it_tab
8062 .extern crypto_fl_tab
8063@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
8064 je B192; \
8065 leaq 32(r9),r9;
8066
8067+#define ret pax_force_retaddr 0, 1; ret
8068+
8069 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
8070 movq r1,r2; \
8071 movq r3,r4; \
8072diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
8073index 3470624..201259d 100644
8074--- a/arch/x86/crypto/aesni-intel_asm.S
8075+++ b/arch/x86/crypto/aesni-intel_asm.S
8076@@ -31,6 +31,7 @@
8077
8078 #include <linux/linkage.h>
8079 #include <asm/inst.h>
8080+#include <asm/alternative-asm.h>
8081
8082 #ifdef __x86_64__
8083 .data
8084@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
8085 pop %r14
8086 pop %r13
8087 pop %r12
8088+ pax_force_retaddr 0, 1
8089 ret
8090+ENDPROC(aesni_gcm_dec)
8091
8092
8093 /*****************************************************************************
8094@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
8095 pop %r14
8096 pop %r13
8097 pop %r12
8098+ pax_force_retaddr 0, 1
8099 ret
8100+ENDPROC(aesni_gcm_enc)
8101
8102 #endif
8103
8104@@ -1714,6 +1719,7 @@ _key_expansion_256a:
8105 pxor %xmm1, %xmm0
8106 movaps %xmm0, (TKEYP)
8107 add $0x10, TKEYP
8108+ pax_force_retaddr_bts
8109 ret
8110
8111 .align 4
8112@@ -1738,6 +1744,7 @@ _key_expansion_192a:
8113 shufps $0b01001110, %xmm2, %xmm1
8114 movaps %xmm1, 0x10(TKEYP)
8115 add $0x20, TKEYP
8116+ pax_force_retaddr_bts
8117 ret
8118
8119 .align 4
8120@@ -1757,6 +1764,7 @@ _key_expansion_192b:
8121
8122 movaps %xmm0, (TKEYP)
8123 add $0x10, TKEYP
8124+ pax_force_retaddr_bts
8125 ret
8126
8127 .align 4
8128@@ -1769,6 +1777,7 @@ _key_expansion_256b:
8129 pxor %xmm1, %xmm2
8130 movaps %xmm2, (TKEYP)
8131 add $0x10, TKEYP
8132+ pax_force_retaddr_bts
8133 ret
8134
8135 /*
8136@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
8137 #ifndef __x86_64__
8138 popl KEYP
8139 #endif
8140+ pax_force_retaddr 0, 1
8141 ret
8142+ENDPROC(aesni_set_key)
8143
8144 /*
8145 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
8146@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
8147 popl KLEN
8148 popl KEYP
8149 #endif
8150+ pax_force_retaddr 0, 1
8151 ret
8152+ENDPROC(aesni_enc)
8153
8154 /*
8155 * _aesni_enc1: internal ABI
8156@@ -1959,6 +1972,7 @@ _aesni_enc1:
8157 AESENC KEY STATE
8158 movaps 0x70(TKEYP), KEY
8159 AESENCLAST KEY STATE
8160+ pax_force_retaddr_bts
8161 ret
8162
8163 /*
8164@@ -2067,6 +2081,7 @@ _aesni_enc4:
8165 AESENCLAST KEY STATE2
8166 AESENCLAST KEY STATE3
8167 AESENCLAST KEY STATE4
8168+ pax_force_retaddr_bts
8169 ret
8170
8171 /*
8172@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
8173 popl KLEN
8174 popl KEYP
8175 #endif
8176+ pax_force_retaddr 0, 1
8177 ret
8178+ENDPROC(aesni_dec)
8179
8180 /*
8181 * _aesni_dec1: internal ABI
8182@@ -2146,6 +2163,7 @@ _aesni_dec1:
8183 AESDEC KEY STATE
8184 movaps 0x70(TKEYP), KEY
8185 AESDECLAST KEY STATE
8186+ pax_force_retaddr_bts
8187 ret
8188
8189 /*
8190@@ -2254,6 +2272,7 @@ _aesni_dec4:
8191 AESDECLAST KEY STATE2
8192 AESDECLAST KEY STATE3
8193 AESDECLAST KEY STATE4
8194+ pax_force_retaddr_bts
8195 ret
8196
8197 /*
8198@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
8199 popl KEYP
8200 popl LEN
8201 #endif
8202+ pax_force_retaddr 0, 1
8203 ret
8204+ENDPROC(aesni_ecb_enc)
8205
8206 /*
8207 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8208@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
8209 popl KEYP
8210 popl LEN
8211 #endif
8212+ pax_force_retaddr 0, 1
8213 ret
8214+ENDPROC(aesni_ecb_dec)
8215
8216 /*
8217 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8218@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
8219 popl LEN
8220 popl IVP
8221 #endif
8222+ pax_force_retaddr 0, 1
8223 ret
8224+ENDPROC(aesni_cbc_enc)
8225
8226 /*
8227 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8228@@ -2500,7 +2525,9 @@ ENTRY(aesni_cbc_dec)
8229 popl LEN
8230 popl IVP
8231 #endif
8232+ pax_force_retaddr 0, 1
8233 ret
8234+ENDPROC(aesni_cbc_dec)
8235
8236 #ifdef __x86_64__
8237 .align 16
8238@@ -2526,6 +2553,7 @@ _aesni_inc_init:
8239 mov $1, TCTR_LOW
8240 MOVQ_R64_XMM TCTR_LOW INC
8241 MOVQ_R64_XMM CTR TCTR_LOW
8242+ pax_force_retaddr_bts
8243 ret
8244
8245 /*
8246@@ -2554,6 +2582,7 @@ _aesni_inc:
8247 .Linc_low:
8248 movaps CTR, IV
8249 PSHUFB_XMM BSWAP_MASK IV
8250+ pax_force_retaddr_bts
8251 ret
8252
8253 /*
8254@@ -2614,5 +2643,7 @@ ENTRY(aesni_ctr_enc)
8255 .Lctr_enc_ret:
8256 movups IV, (IVP)
8257 .Lctr_enc_just_ret:
8258+ pax_force_retaddr 0, 1
8259 ret
8260+ENDPROC(aesni_ctr_enc)
8261 #endif
8262diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8263index 391d245..67f35c2 100644
8264--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
8265+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8266@@ -20,6 +20,8 @@
8267 *
8268 */
8269
8270+#include <asm/alternative-asm.h>
8271+
8272 .file "blowfish-x86_64-asm.S"
8273 .text
8274
8275@@ -151,9 +153,11 @@ __blowfish_enc_blk:
8276 jnz __enc_xor;
8277
8278 write_block();
8279+ pax_force_retaddr 0, 1
8280 ret;
8281 __enc_xor:
8282 xor_block();
8283+ pax_force_retaddr 0, 1
8284 ret;
8285
8286 .align 8
8287@@ -188,6 +192,7 @@ blowfish_dec_blk:
8288
8289 movq %r11, %rbp;
8290
8291+ pax_force_retaddr 0, 1
8292 ret;
8293
8294 /**********************************************************************
8295@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
8296
8297 popq %rbx;
8298 popq %rbp;
8299+ pax_force_retaddr 0, 1
8300 ret;
8301
8302 __enc_xor4:
8303@@ -349,6 +355,7 @@ __enc_xor4:
8304
8305 popq %rbx;
8306 popq %rbp;
8307+ pax_force_retaddr 0, 1
8308 ret;
8309
8310 .align 8
8311@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
8312 popq %rbx;
8313 popq %rbp;
8314
8315+ pax_force_retaddr 0, 1
8316 ret;
8317
8318diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
8319index 0b33743..7a56206 100644
8320--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
8321+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
8322@@ -20,6 +20,8 @@
8323 *
8324 */
8325
8326+#include <asm/alternative-asm.h>
8327+
8328 .file "camellia-x86_64-asm_64.S"
8329 .text
8330
8331@@ -229,12 +231,14 @@ __enc_done:
8332 enc_outunpack(mov, RT1);
8333
8334 movq RRBP, %rbp;
8335+ pax_force_retaddr 0, 1
8336 ret;
8337
8338 __enc_xor:
8339 enc_outunpack(xor, RT1);
8340
8341 movq RRBP, %rbp;
8342+ pax_force_retaddr 0, 1
8343 ret;
8344
8345 .global camellia_dec_blk;
8346@@ -275,6 +279,7 @@ __dec_rounds16:
8347 dec_outunpack();
8348
8349 movq RRBP, %rbp;
8350+ pax_force_retaddr 0, 1
8351 ret;
8352
8353 /**********************************************************************
8354@@ -468,6 +473,7 @@ __enc2_done:
8355
8356 movq RRBP, %rbp;
8357 popq %rbx;
8358+ pax_force_retaddr 0, 1
8359 ret;
8360
8361 __enc2_xor:
8362@@ -475,6 +481,7 @@ __enc2_xor:
8363
8364 movq RRBP, %rbp;
8365 popq %rbx;
8366+ pax_force_retaddr 0, 1
8367 ret;
8368
8369 .global camellia_dec_blk_2way;
8370@@ -517,4 +524,5 @@ __dec2_rounds16:
8371
8372 movq RRBP, %rbp;
8373 movq RXOR, %rbx;
8374+ pax_force_retaddr 0, 1
8375 ret;
8376diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8377index 6214a9b..1f4fc9a 100644
8378--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
8379+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8380@@ -1,3 +1,5 @@
8381+#include <asm/alternative-asm.h>
8382+
8383 # enter ECRYPT_encrypt_bytes
8384 .text
8385 .p2align 5
8386@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
8387 add %r11,%rsp
8388 mov %rdi,%rax
8389 mov %rsi,%rdx
8390+ pax_force_retaddr 0, 1
8391 ret
8392 # bytesatleast65:
8393 ._bytesatleast65:
8394@@ -891,6 +894,7 @@ ECRYPT_keysetup:
8395 add %r11,%rsp
8396 mov %rdi,%rax
8397 mov %rsi,%rdx
8398+ pax_force_retaddr
8399 ret
8400 # enter ECRYPT_ivsetup
8401 .text
8402@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
8403 add %r11,%rsp
8404 mov %rdi,%rax
8405 mov %rsi,%rdx
8406+ pax_force_retaddr
8407 ret
8408diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8409index 3ee1ff0..cbc568b 100644
8410--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8411+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8412@@ -24,6 +24,8 @@
8413 *
8414 */
8415
8416+#include <asm/alternative-asm.h>
8417+
8418 .file "serpent-sse2-x86_64-asm_64.S"
8419 .text
8420
8421@@ -692,12 +694,14 @@ __serpent_enc_blk_8way:
8422 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8423 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8424
8425+ pax_force_retaddr
8426 ret;
8427
8428 __enc_xor8:
8429 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8430 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8431
8432+ pax_force_retaddr
8433 ret;
8434
8435 .align 8
8436@@ -755,4 +759,5 @@ serpent_dec_blk_8way:
8437 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
8438 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
8439
8440+ pax_force_retaddr
8441 ret;
8442diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
8443index b2c2f57..8470cab 100644
8444--- a/arch/x86/crypto/sha1_ssse3_asm.S
8445+++ b/arch/x86/crypto/sha1_ssse3_asm.S
8446@@ -28,6 +28,8 @@
8447 * (at your option) any later version.
8448 */
8449
8450+#include <asm/alternative-asm.h>
8451+
8452 #define CTX %rdi // arg1
8453 #define BUF %rsi // arg2
8454 #define CNT %rdx // arg3
8455@@ -104,6 +106,7 @@
8456 pop %r12
8457 pop %rbp
8458 pop %rbx
8459+ pax_force_retaddr 0, 1
8460 ret
8461
8462 .size \name, .-\name
8463diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8464index 5b012a2..36d5364 100644
8465--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8466+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8467@@ -20,6 +20,8 @@
8468 *
8469 */
8470
8471+#include <asm/alternative-asm.h>
8472+
8473 .file "twofish-x86_64-asm-3way.S"
8474 .text
8475
8476@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
8477 popq %r13;
8478 popq %r14;
8479 popq %r15;
8480+ pax_force_retaddr 0, 1
8481 ret;
8482
8483 __enc_xor3:
8484@@ -271,6 +274,7 @@ __enc_xor3:
8485 popq %r13;
8486 popq %r14;
8487 popq %r15;
8488+ pax_force_retaddr 0, 1
8489 ret;
8490
8491 .global twofish_dec_blk_3way
8492@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
8493 popq %r13;
8494 popq %r14;
8495 popq %r15;
8496+ pax_force_retaddr 0, 1
8497 ret;
8498
8499diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
8500index 7bcf3fc..f53832f 100644
8501--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8502+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
8503@@ -21,6 +21,7 @@
8504 .text
8505
8506 #include <asm/asm-offsets.h>
8507+#include <asm/alternative-asm.h>
8508
8509 #define a_offset 0
8510 #define b_offset 4
8511@@ -268,6 +269,7 @@ twofish_enc_blk:
8512
8513 popq R1
8514 movq $1,%rax
8515+ pax_force_retaddr 0, 1
8516 ret
8517
8518 twofish_dec_blk:
8519@@ -319,4 +321,5 @@ twofish_dec_blk:
8520
8521 popq R1
8522 movq $1,%rax
8523+ pax_force_retaddr 0, 1
8524 ret
8525diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
8526index 07b3a68..bd2a388 100644
8527--- a/arch/x86/ia32/ia32_aout.c
8528+++ b/arch/x86/ia32/ia32_aout.c
8529@@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8530 unsigned long dump_start, dump_size;
8531 struct user32 dump;
8532
8533+ memset(&dump, 0, sizeof(dump));
8534+
8535 fs = get_fs();
8536 set_fs(KERNEL_DS);
8537 has_dumped = 1;
8538diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8539index a69245b..6d145f4 100644
8540--- a/arch/x86/ia32/ia32_signal.c
8541+++ b/arch/x86/ia32/ia32_signal.c
8542@@ -168,7 +168,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8543 }
8544 seg = get_fs();
8545 set_fs(KERNEL_DS);
8546- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8547+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8548 set_fs(seg);
8549 if (ret >= 0 && uoss_ptr) {
8550 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8551@@ -369,7 +369,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8552 */
8553 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8554 size_t frame_size,
8555- void **fpstate)
8556+ void __user **fpstate)
8557 {
8558 unsigned long sp;
8559
8560@@ -390,7 +390,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8561
8562 if (used_math()) {
8563 sp = sp - sig_xstate_ia32_size;
8564- *fpstate = (struct _fpstate_ia32 *) sp;
8565+ *fpstate = (struct _fpstate_ia32 __user *) sp;
8566 if (save_i387_xstate_ia32(*fpstate) < 0)
8567 return (void __user *) -1L;
8568 }
8569@@ -398,7 +398,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8570 sp -= frame_size;
8571 /* Align the stack pointer according to the i386 ABI,
8572 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8573- sp = ((sp + 4) & -16ul) - 4;
8574+ sp = ((sp - 12) & -16ul) - 4;
8575 return (void __user *) sp;
8576 }
8577
8578@@ -456,7 +456,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8579 * These are actually not used anymore, but left because some
8580 * gdb versions depend on them as a marker.
8581 */
8582- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8583+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8584 } put_user_catch(err);
8585
8586 if (err)
8587@@ -498,7 +498,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8588 0xb8,
8589 __NR_ia32_rt_sigreturn,
8590 0x80cd,
8591- 0,
8592+ 0
8593 };
8594
8595 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8596@@ -528,16 +528,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8597
8598 if (ka->sa.sa_flags & SA_RESTORER)
8599 restorer = ka->sa.sa_restorer;
8600+ else if (current->mm->context.vdso)
8601+ /* Return stub is in 32bit vsyscall page */
8602+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8603 else
8604- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8605- rt_sigreturn);
8606+ restorer = &frame->retcode;
8607 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8608
8609 /*
8610 * Not actually used anymore, but left because some gdb
8611 * versions need it.
8612 */
8613- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8614+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8615 } put_user_catch(err);
8616
8617 if (err)
8618diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8619index e3e7340..05ed805 100644
8620--- a/arch/x86/ia32/ia32entry.S
8621+++ b/arch/x86/ia32/ia32entry.S
8622@@ -13,8 +13,10 @@
8623 #include <asm/thread_info.h>
8624 #include <asm/segment.h>
8625 #include <asm/irqflags.h>
8626+#include <asm/pgtable.h>
8627 #include <linux/linkage.h>
8628 #include <linux/err.h>
8629+#include <asm/alternative-asm.h>
8630
8631 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8632 #include <linux/elf-em.h>
8633@@ -94,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
8634 ENDPROC(native_irq_enable_sysexit)
8635 #endif
8636
8637+ .macro pax_enter_kernel_user
8638+ pax_set_fptr_mask
8639+#ifdef CONFIG_PAX_MEMORY_UDEREF
8640+ call pax_enter_kernel_user
8641+#endif
8642+ .endm
8643+
8644+ .macro pax_exit_kernel_user
8645+#ifdef CONFIG_PAX_MEMORY_UDEREF
8646+ call pax_exit_kernel_user
8647+#endif
8648+#ifdef CONFIG_PAX_RANDKSTACK
8649+ pushq %rax
8650+ pushq %r11
8651+ call pax_randomize_kstack
8652+ popq %r11
8653+ popq %rax
8654+#endif
8655+ .endm
8656+
8657+.macro pax_erase_kstack
8658+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8659+ call pax_erase_kstack
8660+#endif
8661+.endm
8662+
8663 /*
8664 * 32bit SYSENTER instruction entry.
8665 *
8666@@ -120,12 +148,6 @@ ENTRY(ia32_sysenter_target)
8667 CFI_REGISTER rsp,rbp
8668 SWAPGS_UNSAFE_STACK
8669 movq PER_CPU_VAR(kernel_stack), %rsp
8670- addq $(KERNEL_STACK_OFFSET),%rsp
8671- /*
8672- * No need to follow this irqs on/off section: the syscall
8673- * disabled irqs, here we enable it straight after entry:
8674- */
8675- ENABLE_INTERRUPTS(CLBR_NONE)
8676 movl %ebp,%ebp /* zero extension */
8677 pushq_cfi $__USER32_DS
8678 /*CFI_REL_OFFSET ss,0*/
8679@@ -133,24 +155,39 @@ ENTRY(ia32_sysenter_target)
8680 CFI_REL_OFFSET rsp,0
8681 pushfq_cfi
8682 /*CFI_REL_OFFSET rflags,0*/
8683- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
8684- CFI_REGISTER rip,r10
8685+ orl $X86_EFLAGS_IF,(%rsp)
8686+ GET_THREAD_INFO(%r11)
8687+ movl TI_sysenter_return(%r11), %r11d
8688+ CFI_REGISTER rip,r11
8689 pushq_cfi $__USER32_CS
8690 /*CFI_REL_OFFSET cs,0*/
8691 movl %eax, %eax
8692- pushq_cfi %r10
8693+ pushq_cfi %r11
8694 CFI_REL_OFFSET rip,0
8695 pushq_cfi %rax
8696 cld
8697 SAVE_ARGS 0,1,0
8698+ pax_enter_kernel_user
8699+ /*
8700+ * No need to follow this irqs on/off section: the syscall
8701+ * disabled irqs, here we enable it straight after entry:
8702+ */
8703+ ENABLE_INTERRUPTS(CLBR_NONE)
8704 /* no need to do an access_ok check here because rbp has been
8705 32bit zero extended */
8706+
8707+#ifdef CONFIG_PAX_MEMORY_UDEREF
8708+ mov $PAX_USER_SHADOW_BASE,%r11
8709+ add %r11,%rbp
8710+#endif
8711+
8712 1: movl (%rbp),%ebp
8713 .section __ex_table,"a"
8714 .quad 1b,ia32_badarg
8715 .previous
8716- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8717- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8718+ GET_THREAD_INFO(%r11)
8719+ orl $TS_COMPAT,TI_status(%r11)
8720+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8721 CFI_REMEMBER_STATE
8722 jnz sysenter_tracesys
8723 cmpq $(IA32_NR_syscalls-1),%rax
8724@@ -160,12 +197,15 @@ sysenter_do_call:
8725 sysenter_dispatch:
8726 call *ia32_sys_call_table(,%rax,8)
8727 movq %rax,RAX-ARGOFFSET(%rsp)
8728+ GET_THREAD_INFO(%r11)
8729 DISABLE_INTERRUPTS(CLBR_NONE)
8730 TRACE_IRQS_OFF
8731- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8732+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8733 jnz sysexit_audit
8734 sysexit_from_sys_call:
8735- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8736+ pax_exit_kernel_user
8737+ pax_erase_kstack
8738+ andl $~TS_COMPAT,TI_status(%r11)
8739 /* clear IF, that popfq doesn't enable interrupts early */
8740 andl $~0x200,EFLAGS-R11(%rsp)
8741 movl RIP-R11(%rsp),%edx /* User %eip */
8742@@ -191,6 +231,9 @@ sysexit_from_sys_call:
8743 movl %eax,%esi /* 2nd arg: syscall number */
8744 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8745 call __audit_syscall_entry
8746+
8747+ pax_erase_kstack
8748+
8749 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8750 cmpq $(IA32_NR_syscalls-1),%rax
8751 ja ia32_badsys
8752@@ -202,7 +245,7 @@ sysexit_from_sys_call:
8753 .endm
8754
8755 .macro auditsys_exit exit
8756- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8757+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8758 jnz ia32_ret_from_sys_call
8759 TRACE_IRQS_ON
8760 sti
8761@@ -213,11 +256,12 @@ sysexit_from_sys_call:
8762 1: setbe %al /* 1 if error, 0 if not */
8763 movzbl %al,%edi /* zero-extend that into %edi */
8764 call __audit_syscall_exit
8765+ GET_THREAD_INFO(%r11)
8766 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
8767 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8768 cli
8769 TRACE_IRQS_OFF
8770- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8771+ testl %edi,TI_flags(%r11)
8772 jz \exit
8773 CLEAR_RREGS -ARGOFFSET
8774 jmp int_with_check
8775@@ -235,7 +279,7 @@ sysexit_audit:
8776
8777 sysenter_tracesys:
8778 #ifdef CONFIG_AUDITSYSCALL
8779- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8780+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8781 jz sysenter_auditsys
8782 #endif
8783 SAVE_REST
8784@@ -243,6 +287,9 @@ sysenter_tracesys:
8785 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8786 movq %rsp,%rdi /* &pt_regs -> arg1 */
8787 call syscall_trace_enter
8788+
8789+ pax_erase_kstack
8790+
8791 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8792 RESTORE_REST
8793 cmpq $(IA32_NR_syscalls-1),%rax
8794@@ -274,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
8795 ENTRY(ia32_cstar_target)
8796 CFI_STARTPROC32 simple
8797 CFI_SIGNAL_FRAME
8798- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8799+ CFI_DEF_CFA rsp,0
8800 CFI_REGISTER rip,rcx
8801 /*CFI_REGISTER rflags,r11*/
8802 SWAPGS_UNSAFE_STACK
8803 movl %esp,%r8d
8804 CFI_REGISTER rsp,r8
8805 movq PER_CPU_VAR(kernel_stack),%rsp
8806+ SAVE_ARGS 8*6,0,0
8807+ pax_enter_kernel_user
8808 /*
8809 * No need to follow this irqs on/off section: the syscall
8810 * disabled irqs and here we enable it straight after entry:
8811 */
8812 ENABLE_INTERRUPTS(CLBR_NONE)
8813- SAVE_ARGS 8,0,0
8814 movl %eax,%eax /* zero extension */
8815 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8816 movq %rcx,RIP-ARGOFFSET(%rsp)
8817@@ -302,12 +350,19 @@ ENTRY(ia32_cstar_target)
8818 /* no need to do an access_ok check here because r8 has been
8819 32bit zero extended */
8820 /* hardware stack frame is complete now */
8821+
8822+#ifdef CONFIG_PAX_MEMORY_UDEREF
8823+ mov $PAX_USER_SHADOW_BASE,%r11
8824+ add %r11,%r8
8825+#endif
8826+
8827 1: movl (%r8),%r9d
8828 .section __ex_table,"a"
8829 .quad 1b,ia32_badarg
8830 .previous
8831- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8832- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8833+ GET_THREAD_INFO(%r11)
8834+ orl $TS_COMPAT,TI_status(%r11)
8835+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8836 CFI_REMEMBER_STATE
8837 jnz cstar_tracesys
8838 cmpq $IA32_NR_syscalls-1,%rax
8839@@ -317,12 +372,15 @@ cstar_do_call:
8840 cstar_dispatch:
8841 call *ia32_sys_call_table(,%rax,8)
8842 movq %rax,RAX-ARGOFFSET(%rsp)
8843+ GET_THREAD_INFO(%r11)
8844 DISABLE_INTERRUPTS(CLBR_NONE)
8845 TRACE_IRQS_OFF
8846- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8847+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8848 jnz sysretl_audit
8849 sysretl_from_sys_call:
8850- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8851+ pax_exit_kernel_user
8852+ pax_erase_kstack
8853+ andl $~TS_COMPAT,TI_status(%r11)
8854 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
8855 movl RIP-ARGOFFSET(%rsp),%ecx
8856 CFI_REGISTER rip,rcx
8857@@ -350,7 +408,7 @@ sysretl_audit:
8858
8859 cstar_tracesys:
8860 #ifdef CONFIG_AUDITSYSCALL
8861- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8862+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8863 jz cstar_auditsys
8864 #endif
8865 xchgl %r9d,%ebp
8866@@ -359,6 +417,9 @@ cstar_tracesys:
8867 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8868 movq %rsp,%rdi /* &pt_regs -> arg1 */
8869 call syscall_trace_enter
8870+
8871+ pax_erase_kstack
8872+
8873 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8874 RESTORE_REST
8875 xchgl %ebp,%r9d
8876@@ -404,19 +465,21 @@ ENTRY(ia32_syscall)
8877 CFI_REL_OFFSET rip,RIP-RIP
8878 PARAVIRT_ADJUST_EXCEPTION_FRAME
8879 SWAPGS
8880- /*
8881- * No need to follow this irqs on/off section: the syscall
8882- * disabled irqs and here we enable it straight after entry:
8883- */
8884- ENABLE_INTERRUPTS(CLBR_NONE)
8885 movl %eax,%eax
8886 pushq_cfi %rax
8887 cld
8888 /* note the registers are not zero extended to the sf.
8889 this could be a problem. */
8890 SAVE_ARGS 0,1,0
8891- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8892- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8893+ pax_enter_kernel_user
8894+ /*
8895+ * No need to follow this irqs on/off section: the syscall
8896+ * disabled irqs and here we enable it straight after entry:
8897+ */
8898+ ENABLE_INTERRUPTS(CLBR_NONE)
8899+ GET_THREAD_INFO(%r11)
8900+ orl $TS_COMPAT,TI_status(%r11)
8901+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8902 jnz ia32_tracesys
8903 cmpq $(IA32_NR_syscalls-1),%rax
8904 ja ia32_badsys
8905@@ -435,6 +498,9 @@ ia32_tracesys:
8906 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8907 movq %rsp,%rdi /* &pt_regs -> arg1 */
8908 call syscall_trace_enter
8909+
8910+ pax_erase_kstack
8911+
8912 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8913 RESTORE_REST
8914 cmpq $(IA32_NR_syscalls-1),%rax
8915diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8916index aec2202..f76174e 100644
8917--- a/arch/x86/ia32/sys_ia32.c
8918+++ b/arch/x86/ia32/sys_ia32.c
8919@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8920 */
8921 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8922 {
8923- typeof(ubuf->st_uid) uid = 0;
8924- typeof(ubuf->st_gid) gid = 0;
8925+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
8926+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
8927 SET_UID(uid, stat->uid);
8928 SET_GID(gid, stat->gid);
8929 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8930@@ -292,7 +292,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
8931 return alarm_setitimer(seconds);
8932 }
8933
8934-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
8935+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
8936 int options)
8937 {
8938 return compat_sys_wait4(pid, stat_addr, options, NULL);
8939@@ -313,7 +313,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8940 mm_segment_t old_fs = get_fs();
8941
8942 set_fs(KERNEL_DS);
8943- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8944+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8945 set_fs(old_fs);
8946 if (put_compat_timespec(&t, interval))
8947 return -EFAULT;
8948@@ -329,7 +329,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8949 mm_segment_t old_fs = get_fs();
8950
8951 set_fs(KERNEL_DS);
8952- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8953+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8954 set_fs(old_fs);
8955 if (!ret) {
8956 switch (_NSIG_WORDS) {
8957@@ -354,7 +354,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8958 if (copy_siginfo_from_user32(&info, uinfo))
8959 return -EFAULT;
8960 set_fs(KERNEL_DS);
8961- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8962+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8963 set_fs(old_fs);
8964 return ret;
8965 }
8966@@ -399,7 +399,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8967 return -EFAULT;
8968
8969 set_fs(KERNEL_DS);
8970- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8971+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8972 count);
8973 set_fs(old_fs);
8974
8975diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8976index 952bd01..7692c6f 100644
8977--- a/arch/x86/include/asm/alternative-asm.h
8978+++ b/arch/x86/include/asm/alternative-asm.h
8979@@ -15,6 +15,45 @@
8980 .endm
8981 #endif
8982
8983+#ifdef KERNEXEC_PLUGIN
8984+ .macro pax_force_retaddr_bts rip=0
8985+ btsq $63,\rip(%rsp)
8986+ .endm
8987+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8988+ .macro pax_force_retaddr rip=0, reload=0
8989+ btsq $63,\rip(%rsp)
8990+ .endm
8991+ .macro pax_force_fptr ptr
8992+ btsq $63,\ptr
8993+ .endm
8994+ .macro pax_set_fptr_mask
8995+ .endm
8996+#endif
8997+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8998+ .macro pax_force_retaddr rip=0, reload=0
8999+ .if \reload
9000+ pax_set_fptr_mask
9001+ .endif
9002+ orq %r10,\rip(%rsp)
9003+ .endm
9004+ .macro pax_force_fptr ptr
9005+ orq %r10,\ptr
9006+ .endm
9007+ .macro pax_set_fptr_mask
9008+ movabs $0x8000000000000000,%r10
9009+ .endm
9010+#endif
9011+#else
9012+ .macro pax_force_retaddr rip=0, reload=0
9013+ .endm
9014+ .macro pax_force_fptr ptr
9015+ .endm
9016+ .macro pax_force_retaddr_bts rip=0
9017+ .endm
9018+ .macro pax_set_fptr_mask
9019+ .endm
9020+#endif
9021+
9022 .macro altinstruction_entry orig alt feature orig_len alt_len
9023 .long \orig - .
9024 .long \alt - .
9025diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
9026index 49331be..9706065 100644
9027--- a/arch/x86/include/asm/alternative.h
9028+++ b/arch/x86/include/asm/alternative.h
9029@@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
9030 ".section .discard,\"aw\",@progbits\n" \
9031 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
9032 ".previous\n" \
9033- ".section .altinstr_replacement, \"ax\"\n" \
9034+ ".section .altinstr_replacement, \"a\"\n" \
9035 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
9036 ".previous"
9037
9038diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
9039index d854101..f6ea947 100644
9040--- a/arch/x86/include/asm/apic.h
9041+++ b/arch/x86/include/asm/apic.h
9042@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
9043
9044 #ifdef CONFIG_X86_LOCAL_APIC
9045
9046-extern unsigned int apic_verbosity;
9047+extern int apic_verbosity;
9048 extern int local_apic_timer_c2_ok;
9049
9050 extern int disable_apic;
9051diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
9052index 20370c6..a2eb9b0 100644
9053--- a/arch/x86/include/asm/apm.h
9054+++ b/arch/x86/include/asm/apm.h
9055@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
9056 __asm__ __volatile__(APM_DO_ZERO_SEGS
9057 "pushl %%edi\n\t"
9058 "pushl %%ebp\n\t"
9059- "lcall *%%cs:apm_bios_entry\n\t"
9060+ "lcall *%%ss:apm_bios_entry\n\t"
9061 "setc %%al\n\t"
9062 "popl %%ebp\n\t"
9063 "popl %%edi\n\t"
9064@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
9065 __asm__ __volatile__(APM_DO_ZERO_SEGS
9066 "pushl %%edi\n\t"
9067 "pushl %%ebp\n\t"
9068- "lcall *%%cs:apm_bios_entry\n\t"
9069+ "lcall *%%ss:apm_bios_entry\n\t"
9070 "setc %%bl\n\t"
9071 "popl %%ebp\n\t"
9072 "popl %%edi\n\t"
9073diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
9074index 58cb6d4..ca9010d 100644
9075--- a/arch/x86/include/asm/atomic.h
9076+++ b/arch/x86/include/asm/atomic.h
9077@@ -22,7 +22,18 @@
9078 */
9079 static inline int atomic_read(const atomic_t *v)
9080 {
9081- return (*(volatile int *)&(v)->counter);
9082+ return (*(volatile const int *)&(v)->counter);
9083+}
9084+
9085+/**
9086+ * atomic_read_unchecked - read atomic variable
9087+ * @v: pointer of type atomic_unchecked_t
9088+ *
9089+ * Atomically reads the value of @v.
9090+ */
9091+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9092+{
9093+ return (*(volatile const int *)&(v)->counter);
9094 }
9095
9096 /**
9097@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
9098 }
9099
9100 /**
9101+ * atomic_set_unchecked - set atomic variable
9102+ * @v: pointer of type atomic_unchecked_t
9103+ * @i: required value
9104+ *
9105+ * Atomically sets the value of @v to @i.
9106+ */
9107+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9108+{
9109+ v->counter = i;
9110+}
9111+
9112+/**
9113 * atomic_add - add integer to atomic variable
9114 * @i: integer value to add
9115 * @v: pointer of type atomic_t
9116@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
9117 */
9118 static inline void atomic_add(int i, atomic_t *v)
9119 {
9120- asm volatile(LOCK_PREFIX "addl %1,%0"
9121+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
9122+
9123+#ifdef CONFIG_PAX_REFCOUNT
9124+ "jno 0f\n"
9125+ LOCK_PREFIX "subl %1,%0\n"
9126+ "int $4\n0:\n"
9127+ _ASM_EXTABLE(0b, 0b)
9128+#endif
9129+
9130+ : "+m" (v->counter)
9131+ : "ir" (i));
9132+}
9133+
9134+/**
9135+ * atomic_add_unchecked - add integer to atomic variable
9136+ * @i: integer value to add
9137+ * @v: pointer of type atomic_unchecked_t
9138+ *
9139+ * Atomically adds @i to @v.
9140+ */
9141+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
9142+{
9143+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
9144 : "+m" (v->counter)
9145 : "ir" (i));
9146 }
9147@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
9148 */
9149 static inline void atomic_sub(int i, atomic_t *v)
9150 {
9151- asm volatile(LOCK_PREFIX "subl %1,%0"
9152+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
9153+
9154+#ifdef CONFIG_PAX_REFCOUNT
9155+ "jno 0f\n"
9156+ LOCK_PREFIX "addl %1,%0\n"
9157+ "int $4\n0:\n"
9158+ _ASM_EXTABLE(0b, 0b)
9159+#endif
9160+
9161+ : "+m" (v->counter)
9162+ : "ir" (i));
9163+}
9164+
9165+/**
9166+ * atomic_sub_unchecked - subtract integer from atomic variable
9167+ * @i: integer value to subtract
9168+ * @v: pointer of type atomic_unchecked_t
9169+ *
9170+ * Atomically subtracts @i from @v.
9171+ */
9172+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
9173+{
9174+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
9175 : "+m" (v->counter)
9176 : "ir" (i));
9177 }
9178@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9179 {
9180 unsigned char c;
9181
9182- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9183+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
9184+
9185+#ifdef CONFIG_PAX_REFCOUNT
9186+ "jno 0f\n"
9187+ LOCK_PREFIX "addl %2,%0\n"
9188+ "int $4\n0:\n"
9189+ _ASM_EXTABLE(0b, 0b)
9190+#endif
9191+
9192+ "sete %1\n"
9193 : "+m" (v->counter), "=qm" (c)
9194 : "ir" (i) : "memory");
9195 return c;
9196@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9197 */
9198 static inline void atomic_inc(atomic_t *v)
9199 {
9200- asm volatile(LOCK_PREFIX "incl %0"
9201+ asm volatile(LOCK_PREFIX "incl %0\n"
9202+
9203+#ifdef CONFIG_PAX_REFCOUNT
9204+ "jno 0f\n"
9205+ LOCK_PREFIX "decl %0\n"
9206+ "int $4\n0:\n"
9207+ _ASM_EXTABLE(0b, 0b)
9208+#endif
9209+
9210+ : "+m" (v->counter));
9211+}
9212+
9213+/**
9214+ * atomic_inc_unchecked - increment atomic variable
9215+ * @v: pointer of type atomic_unchecked_t
9216+ *
9217+ * Atomically increments @v by 1.
9218+ */
9219+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9220+{
9221+ asm volatile(LOCK_PREFIX "incl %0\n"
9222 : "+m" (v->counter));
9223 }
9224
9225@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
9226 */
9227 static inline void atomic_dec(atomic_t *v)
9228 {
9229- asm volatile(LOCK_PREFIX "decl %0"
9230+ asm volatile(LOCK_PREFIX "decl %0\n"
9231+
9232+#ifdef CONFIG_PAX_REFCOUNT
9233+ "jno 0f\n"
9234+ LOCK_PREFIX "incl %0\n"
9235+ "int $4\n0:\n"
9236+ _ASM_EXTABLE(0b, 0b)
9237+#endif
9238+
9239+ : "+m" (v->counter));
9240+}
9241+
9242+/**
9243+ * atomic_dec_unchecked - decrement atomic variable
9244+ * @v: pointer of type atomic_unchecked_t
9245+ *
9246+ * Atomically decrements @v by 1.
9247+ */
9248+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9249+{
9250+ asm volatile(LOCK_PREFIX "decl %0\n"
9251 : "+m" (v->counter));
9252 }
9253
9254@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9255 {
9256 unsigned char c;
9257
9258- asm volatile(LOCK_PREFIX "decl %0; sete %1"
9259+ asm volatile(LOCK_PREFIX "decl %0\n"
9260+
9261+#ifdef CONFIG_PAX_REFCOUNT
9262+ "jno 0f\n"
9263+ LOCK_PREFIX "incl %0\n"
9264+ "int $4\n0:\n"
9265+ _ASM_EXTABLE(0b, 0b)
9266+#endif
9267+
9268+ "sete %1\n"
9269 : "+m" (v->counter), "=qm" (c)
9270 : : "memory");
9271 return c != 0;
9272@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9273 {
9274 unsigned char c;
9275
9276- asm volatile(LOCK_PREFIX "incl %0; sete %1"
9277+ asm volatile(LOCK_PREFIX "incl %0\n"
9278+
9279+#ifdef CONFIG_PAX_REFCOUNT
9280+ "jno 0f\n"
9281+ LOCK_PREFIX "decl %0\n"
9282+ "int $4\n0:\n"
9283+ _ASM_EXTABLE(0b, 0b)
9284+#endif
9285+
9286+ "sete %1\n"
9287+ : "+m" (v->counter), "=qm" (c)
9288+ : : "memory");
9289+ return c != 0;
9290+}
9291+
9292+/**
9293+ * atomic_inc_and_test_unchecked - increment and test
9294+ * @v: pointer of type atomic_unchecked_t
9295+ *
9296+ * Atomically increments @v by 1
9297+ * and returns true if the result is zero, or false for all
9298+ * other cases.
9299+ */
9300+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9301+{
9302+ unsigned char c;
9303+
9304+ asm volatile(LOCK_PREFIX "incl %0\n"
9305+ "sete %1\n"
9306 : "+m" (v->counter), "=qm" (c)
9307 : : "memory");
9308 return c != 0;
9309@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9310 {
9311 unsigned char c;
9312
9313- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9314+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
9315+
9316+#ifdef CONFIG_PAX_REFCOUNT
9317+ "jno 0f\n"
9318+ LOCK_PREFIX "subl %2,%0\n"
9319+ "int $4\n0:\n"
9320+ _ASM_EXTABLE(0b, 0b)
9321+#endif
9322+
9323+ "sets %1\n"
9324 : "+m" (v->counter), "=qm" (c)
9325 : "ir" (i) : "memory");
9326 return c;
9327@@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
9328 goto no_xadd;
9329 #endif
9330 /* Modern 486+ processor */
9331- return i + xadd(&v->counter, i);
9332+ return i + xadd_check_overflow(&v->counter, i);
9333
9334 #ifdef CONFIG_M386
9335 no_xadd: /* Legacy 386 processor */
9336@@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
9337 }
9338
9339 /**
9340+ * atomic_add_return_unchecked - add integer and return
9341+ * @i: integer value to add
9342+ * @v: pointer of type atomic_unchecked_t
9343+ *
9344+ * Atomically adds @i to @v and returns @i + @v
9345+ */
9346+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9347+{
9348+#ifdef CONFIG_M386
9349+ int __i;
9350+ unsigned long flags;
9351+ if (unlikely(boot_cpu_data.x86 <= 3))
9352+ goto no_xadd;
9353+#endif
9354+ /* Modern 486+ processor */
9355+ return i + xadd(&v->counter, i);
9356+
9357+#ifdef CONFIG_M386
9358+no_xadd: /* Legacy 386 processor */
9359+ raw_local_irq_save(flags);
9360+ __i = atomic_read_unchecked(v);
9361+ atomic_set_unchecked(v, i + __i);
9362+ raw_local_irq_restore(flags);
9363+ return i + __i;
9364+#endif
9365+}
9366+
9367+/**
9368 * atomic_sub_return - subtract integer and return
9369 * @v: pointer of type atomic_t
9370 * @i: integer value to subtract
9371@@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9372 }
9373
9374 #define atomic_inc_return(v) (atomic_add_return(1, v))
9375+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9376+{
9377+ return atomic_add_return_unchecked(1, v);
9378+}
9379 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9380
9381 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9382@@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9383 return cmpxchg(&v->counter, old, new);
9384 }
9385
9386+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9387+{
9388+ return cmpxchg(&v->counter, old, new);
9389+}
9390+
9391 static inline int atomic_xchg(atomic_t *v, int new)
9392 {
9393 return xchg(&v->counter, new);
9394 }
9395
9396+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9397+{
9398+ return xchg(&v->counter, new);
9399+}
9400+
9401 /**
9402 * __atomic_add_unless - add unless the number is already a given value
9403 * @v: pointer of type atomic_t
9404@@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
9405 */
9406 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9407 {
9408- int c, old;
9409+ int c, old, new;
9410 c = atomic_read(v);
9411 for (;;) {
9412- if (unlikely(c == (u)))
9413+ if (unlikely(c == u))
9414 break;
9415- old = atomic_cmpxchg((v), c, c + (a));
9416+
9417+ asm volatile("addl %2,%0\n"
9418+
9419+#ifdef CONFIG_PAX_REFCOUNT
9420+ "jno 0f\n"
9421+ "subl %2,%0\n"
9422+ "int $4\n0:\n"
9423+ _ASM_EXTABLE(0b, 0b)
9424+#endif
9425+
9426+ : "=r" (new)
9427+ : "0" (c), "ir" (a));
9428+
9429+ old = atomic_cmpxchg(v, c, new);
9430 if (likely(old == c))
9431 break;
9432 c = old;
9433@@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9434 return c;
9435 }
9436
9437+/**
9438+ * atomic_inc_not_zero_hint - increment if not null
9439+ * @v: pointer of type atomic_t
9440+ * @hint: probable value of the atomic before the increment
9441+ *
9442+ * This version of atomic_inc_not_zero() gives a hint of probable
9443+ * value of the atomic. This helps processor to not read the memory
9444+ * before doing the atomic read/modify/write cycle, lowering
9445+ * number of bus transactions on some arches.
9446+ *
9447+ * Returns: 0 if increment was not done, 1 otherwise.
9448+ */
9449+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
9450+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
9451+{
9452+ int val, c = hint, new;
9453+
9454+ /* sanity test, should be removed by compiler if hint is a constant */
9455+ if (!hint)
9456+ return __atomic_add_unless(v, 1, 0);
9457+
9458+ do {
9459+ asm volatile("incl %0\n"
9460+
9461+#ifdef CONFIG_PAX_REFCOUNT
9462+ "jno 0f\n"
9463+ "decl %0\n"
9464+ "int $4\n0:\n"
9465+ _ASM_EXTABLE(0b, 0b)
9466+#endif
9467+
9468+ : "=r" (new)
9469+ : "0" (c));
9470+
9471+ val = atomic_cmpxchg(v, c, new);
9472+ if (val == c)
9473+ return 1;
9474+ c = val;
9475+ } while (c);
9476+
9477+ return 0;
9478+}
9479
9480 /*
9481 * atomic_dec_if_positive - decrement by 1 if old value positive
9482diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
9483index 1981199..36b9dfb 100644
9484--- a/arch/x86/include/asm/atomic64_32.h
9485+++ b/arch/x86/include/asm/atomic64_32.h
9486@@ -12,6 +12,14 @@ typedef struct {
9487 u64 __aligned(8) counter;
9488 } atomic64_t;
9489
9490+#ifdef CONFIG_PAX_REFCOUNT
9491+typedef struct {
9492+ u64 __aligned(8) counter;
9493+} atomic64_unchecked_t;
9494+#else
9495+typedef atomic64_t atomic64_unchecked_t;
9496+#endif
9497+
9498 #define ATOMIC64_INIT(val) { (val) }
9499
9500 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
9501@@ -37,21 +45,31 @@ typedef struct {
9502 ATOMIC64_DECL_ONE(sym##_386)
9503
9504 ATOMIC64_DECL_ONE(add_386);
9505+ATOMIC64_DECL_ONE(add_unchecked_386);
9506 ATOMIC64_DECL_ONE(sub_386);
9507+ATOMIC64_DECL_ONE(sub_unchecked_386);
9508 ATOMIC64_DECL_ONE(inc_386);
9509+ATOMIC64_DECL_ONE(inc_unchecked_386);
9510 ATOMIC64_DECL_ONE(dec_386);
9511+ATOMIC64_DECL_ONE(dec_unchecked_386);
9512 #endif
9513
9514 #define alternative_atomic64(f, out, in...) \
9515 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
9516
9517 ATOMIC64_DECL(read);
9518+ATOMIC64_DECL(read_unchecked);
9519 ATOMIC64_DECL(set);
9520+ATOMIC64_DECL(set_unchecked);
9521 ATOMIC64_DECL(xchg);
9522 ATOMIC64_DECL(add_return);
9523+ATOMIC64_DECL(add_return_unchecked);
9524 ATOMIC64_DECL(sub_return);
9525+ATOMIC64_DECL(sub_return_unchecked);
9526 ATOMIC64_DECL(inc_return);
9527+ATOMIC64_DECL(inc_return_unchecked);
9528 ATOMIC64_DECL(dec_return);
9529+ATOMIC64_DECL(dec_return_unchecked);
9530 ATOMIC64_DECL(dec_if_positive);
9531 ATOMIC64_DECL(inc_not_zero);
9532 ATOMIC64_DECL(add_unless);
9533@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
9534 }
9535
9536 /**
9537+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
9538+ * @p: pointer to type atomic64_unchecked_t
9539+ * @o: expected value
9540+ * @n: new value
9541+ *
9542+ * Atomically sets @v to @n if it was equal to @o and returns
9543+ * the old value.
9544+ */
9545+
9546+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
9547+{
9548+ return cmpxchg64(&v->counter, o, n);
9549+}
9550+
9551+/**
9552 * atomic64_xchg - xchg atomic64 variable
9553 * @v: pointer to type atomic64_t
9554 * @n: value to assign
9555@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
9556 }
9557
9558 /**
9559+ * atomic64_set_unchecked - set atomic64 variable
9560+ * @v: pointer to type atomic64_unchecked_t
9561+ * @n: value to assign
9562+ *
9563+ * Atomically sets the value of @v to @n.
9564+ */
9565+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
9566+{
9567+ unsigned high = (unsigned)(i >> 32);
9568+ unsigned low = (unsigned)i;
9569+ alternative_atomic64(set, /* no output */,
9570+ "S" (v), "b" (low), "c" (high)
9571+ : "eax", "edx", "memory");
9572+}
9573+
9574+/**
9575 * atomic64_read - read atomic64 variable
9576 * @v: pointer to type atomic64_t
9577 *
9578@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
9579 }
9580
9581 /**
9582+ * atomic64_read_unchecked - read atomic64 variable
9583+ * @v: pointer to type atomic64_unchecked_t
9584+ *
9585+ * Atomically reads the value of @v and returns it.
9586+ */
9587+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
9588+{
9589+ long long r;
9590+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
9591+ return r;
9592+ }
9593+
9594+/**
9595 * atomic64_add_return - add and return
9596 * @i: integer value to add
9597 * @v: pointer to type atomic64_t
9598@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
9599 return i;
9600 }
9601
9602+/**
9603+ * atomic64_add_return_unchecked - add and return
9604+ * @i: integer value to add
9605+ * @v: pointer to type atomic64_unchecked_t
9606+ *
9607+ * Atomically adds @i to @v and returns @i + *@v
9608+ */
9609+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
9610+{
9611+ alternative_atomic64(add_return_unchecked,
9612+ ASM_OUTPUT2("+A" (i), "+c" (v)),
9613+ ASM_NO_INPUT_CLOBBER("memory"));
9614+ return i;
9615+}
9616+
9617 /*
9618 * Other variants with different arithmetic operators:
9619 */
9620@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
9621 return a;
9622 }
9623
9624+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9625+{
9626+ long long a;
9627+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
9628+ "S" (v) : "memory", "ecx");
9629+ return a;
9630+}
9631+
9632 static inline long long atomic64_dec_return(atomic64_t *v)
9633 {
9634 long long a;
9635@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
9636 }
9637
9638 /**
9639+ * atomic64_add_unchecked - add integer to atomic64 variable
9640+ * @i: integer value to add
9641+ * @v: pointer to type atomic64_unchecked_t
9642+ *
9643+ * Atomically adds @i to @v.
9644+ */
9645+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
9646+{
9647+ __alternative_atomic64(add_unchecked, add_return_unchecked,
9648+ ASM_OUTPUT2("+A" (i), "+c" (v)),
9649+ ASM_NO_INPUT_CLOBBER("memory"));
9650+ return i;
9651+}
9652+
9653+/**
9654 * atomic64_sub - subtract the atomic64 variable
9655 * @i: integer value to subtract
9656 * @v: pointer to type atomic64_t
9657diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
9658index 0e1cbfc..5623683 100644
9659--- a/arch/x86/include/asm/atomic64_64.h
9660+++ b/arch/x86/include/asm/atomic64_64.h
9661@@ -18,7 +18,19 @@
9662 */
9663 static inline long atomic64_read(const atomic64_t *v)
9664 {
9665- return (*(volatile long *)&(v)->counter);
9666+ return (*(volatile const long *)&(v)->counter);
9667+}
9668+
9669+/**
9670+ * atomic64_read_unchecked - read atomic64 variable
9671+ * @v: pointer of type atomic64_unchecked_t
9672+ *
9673+ * Atomically reads the value of @v.
9674+ * Doesn't imply a read memory barrier.
9675+ */
9676+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9677+{
9678+ return (*(volatile const long *)&(v)->counter);
9679 }
9680
9681 /**
9682@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9683 }
9684
9685 /**
9686+ * atomic64_set_unchecked - set atomic64 variable
9687+ * @v: pointer to type atomic64_unchecked_t
9688+ * @i: required value
9689+ *
9690+ * Atomically sets the value of @v to @i.
9691+ */
9692+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9693+{
9694+ v->counter = i;
9695+}
9696+
9697+/**
9698 * atomic64_add - add integer to atomic64 variable
9699 * @i: integer value to add
9700 * @v: pointer to type atomic64_t
9701@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9702 */
9703 static inline void atomic64_add(long i, atomic64_t *v)
9704 {
9705+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
9706+
9707+#ifdef CONFIG_PAX_REFCOUNT
9708+ "jno 0f\n"
9709+ LOCK_PREFIX "subq %1,%0\n"
9710+ "int $4\n0:\n"
9711+ _ASM_EXTABLE(0b, 0b)
9712+#endif
9713+
9714+ : "=m" (v->counter)
9715+ : "er" (i), "m" (v->counter));
9716+}
9717+
9718+/**
9719+ * atomic64_add_unchecked - add integer to atomic64 variable
9720+ * @i: integer value to add
9721+ * @v: pointer to type atomic64_unchecked_t
9722+ *
9723+ * Atomically adds @i to @v.
9724+ */
9725+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9726+{
9727 asm volatile(LOCK_PREFIX "addq %1,%0"
9728 : "=m" (v->counter)
9729 : "er" (i), "m" (v->counter));
9730@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
9731 */
9732 static inline void atomic64_sub(long i, atomic64_t *v)
9733 {
9734- asm volatile(LOCK_PREFIX "subq %1,%0"
9735+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
9736+
9737+#ifdef CONFIG_PAX_REFCOUNT
9738+ "jno 0f\n"
9739+ LOCK_PREFIX "addq %1,%0\n"
9740+ "int $4\n0:\n"
9741+ _ASM_EXTABLE(0b, 0b)
9742+#endif
9743+
9744+ : "=m" (v->counter)
9745+ : "er" (i), "m" (v->counter));
9746+}
9747+
9748+/**
9749+ * atomic64_sub_unchecked - subtract the atomic64 variable
9750+ * @i: integer value to subtract
9751+ * @v: pointer to type atomic64_unchecked_t
9752+ *
9753+ * Atomically subtracts @i from @v.
9754+ */
9755+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
9756+{
9757+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
9758 : "=m" (v->counter)
9759 : "er" (i), "m" (v->counter));
9760 }
9761@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9762 {
9763 unsigned char c;
9764
9765- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9766+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
9767+
9768+#ifdef CONFIG_PAX_REFCOUNT
9769+ "jno 0f\n"
9770+ LOCK_PREFIX "addq %2,%0\n"
9771+ "int $4\n0:\n"
9772+ _ASM_EXTABLE(0b, 0b)
9773+#endif
9774+
9775+ "sete %1\n"
9776 : "=m" (v->counter), "=qm" (c)
9777 : "er" (i), "m" (v->counter) : "memory");
9778 return c;
9779@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9780 */
9781 static inline void atomic64_inc(atomic64_t *v)
9782 {
9783+ asm volatile(LOCK_PREFIX "incq %0\n"
9784+
9785+#ifdef CONFIG_PAX_REFCOUNT
9786+ "jno 0f\n"
9787+ LOCK_PREFIX "decq %0\n"
9788+ "int $4\n0:\n"
9789+ _ASM_EXTABLE(0b, 0b)
9790+#endif
9791+
9792+ : "=m" (v->counter)
9793+ : "m" (v->counter));
9794+}
9795+
9796+/**
9797+ * atomic64_inc_unchecked - increment atomic64 variable
9798+ * @v: pointer to type atomic64_unchecked_t
9799+ *
9800+ * Atomically increments @v by 1.
9801+ */
9802+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9803+{
9804 asm volatile(LOCK_PREFIX "incq %0"
9805 : "=m" (v->counter)
9806 : "m" (v->counter));
9807@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
9808 */
9809 static inline void atomic64_dec(atomic64_t *v)
9810 {
9811- asm volatile(LOCK_PREFIX "decq %0"
9812+ asm volatile(LOCK_PREFIX "decq %0\n"
9813+
9814+#ifdef CONFIG_PAX_REFCOUNT
9815+ "jno 0f\n"
9816+ LOCK_PREFIX "incq %0\n"
9817+ "int $4\n0:\n"
9818+ _ASM_EXTABLE(0b, 0b)
9819+#endif
9820+
9821+ : "=m" (v->counter)
9822+ : "m" (v->counter));
9823+}
9824+
9825+/**
9826+ * atomic64_dec_unchecked - decrement atomic64 variable
9827+ * @v: pointer to type atomic64_t
9828+ *
9829+ * Atomically decrements @v by 1.
9830+ */
9831+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9832+{
9833+ asm volatile(LOCK_PREFIX "decq %0\n"
9834 : "=m" (v->counter)
9835 : "m" (v->counter));
9836 }
9837@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9838 {
9839 unsigned char c;
9840
9841- asm volatile(LOCK_PREFIX "decq %0; sete %1"
9842+ asm volatile(LOCK_PREFIX "decq %0\n"
9843+
9844+#ifdef CONFIG_PAX_REFCOUNT
9845+ "jno 0f\n"
9846+ LOCK_PREFIX "incq %0\n"
9847+ "int $4\n0:\n"
9848+ _ASM_EXTABLE(0b, 0b)
9849+#endif
9850+
9851+ "sete %1\n"
9852 : "=m" (v->counter), "=qm" (c)
9853 : "m" (v->counter) : "memory");
9854 return c != 0;
9855@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9856 {
9857 unsigned char c;
9858
9859- asm volatile(LOCK_PREFIX "incq %0; sete %1"
9860+ asm volatile(LOCK_PREFIX "incq %0\n"
9861+
9862+#ifdef CONFIG_PAX_REFCOUNT
9863+ "jno 0f\n"
9864+ LOCK_PREFIX "decq %0\n"
9865+ "int $4\n0:\n"
9866+ _ASM_EXTABLE(0b, 0b)
9867+#endif
9868+
9869+ "sete %1\n"
9870 : "=m" (v->counter), "=qm" (c)
9871 : "m" (v->counter) : "memory");
9872 return c != 0;
9873@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9874 {
9875 unsigned char c;
9876
9877- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9878+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
9879+
9880+#ifdef CONFIG_PAX_REFCOUNT
9881+ "jno 0f\n"
9882+ LOCK_PREFIX "subq %2,%0\n"
9883+ "int $4\n0:\n"
9884+ _ASM_EXTABLE(0b, 0b)
9885+#endif
9886+
9887+ "sets %1\n"
9888 : "=m" (v->counter), "=qm" (c)
9889 : "er" (i), "m" (v->counter) : "memory");
9890 return c;
9891@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9892 */
9893 static inline long atomic64_add_return(long i, atomic64_t *v)
9894 {
9895+ return i + xadd_check_overflow(&v->counter, i);
9896+}
9897+
9898+/**
9899+ * atomic64_add_return_unchecked - add and return
9900+ * @i: integer value to add
9901+ * @v: pointer to type atomic64_unchecked_t
9902+ *
9903+ * Atomically adds @i to @v and returns @i + @v
9904+ */
9905+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9906+{
9907 return i + xadd(&v->counter, i);
9908 }
9909
9910@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9911 }
9912
9913 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9914+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9915+{
9916+ return atomic64_add_return_unchecked(1, v);
9917+}
9918 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9919
9920 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9921@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9922 return cmpxchg(&v->counter, old, new);
9923 }
9924
9925+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9926+{
9927+ return cmpxchg(&v->counter, old, new);
9928+}
9929+
9930 static inline long atomic64_xchg(atomic64_t *v, long new)
9931 {
9932 return xchg(&v->counter, new);
9933@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
9934 */
9935 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9936 {
9937- long c, old;
9938+ long c, old, new;
9939 c = atomic64_read(v);
9940 for (;;) {
9941- if (unlikely(c == (u)))
9942+ if (unlikely(c == u))
9943 break;
9944- old = atomic64_cmpxchg((v), c, c + (a));
9945+
9946+ asm volatile("add %2,%0\n"
9947+
9948+#ifdef CONFIG_PAX_REFCOUNT
9949+ "jno 0f\n"
9950+ "sub %2,%0\n"
9951+ "int $4\n0:\n"
9952+ _ASM_EXTABLE(0b, 0b)
9953+#endif
9954+
9955+ : "=r" (new)
9956+ : "0" (c), "ir" (a));
9957+
9958+ old = atomic64_cmpxchg(v, c, new);
9959 if (likely(old == c))
9960 break;
9961 c = old;
9962 }
9963- return c != (u);
9964+ return c != u;
9965 }
9966
9967 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9968diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
9969index b97596e..9bd48b06 100644
9970--- a/arch/x86/include/asm/bitops.h
9971+++ b/arch/x86/include/asm/bitops.h
9972@@ -38,7 +38,7 @@
9973 * a mask operation on a byte.
9974 */
9975 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
9976-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
9977+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
9978 #define CONST_MASK(nr) (1 << ((nr) & 7))
9979
9980 /**
9981diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
9982index 5e1a2ee..c9f9533 100644
9983--- a/arch/x86/include/asm/boot.h
9984+++ b/arch/x86/include/asm/boot.h
9985@@ -11,10 +11,15 @@
9986 #include <asm/pgtable_types.h>
9987
9988 /* Physical address where kernel should be loaded. */
9989-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9990+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9991 + (CONFIG_PHYSICAL_ALIGN - 1)) \
9992 & ~(CONFIG_PHYSICAL_ALIGN - 1))
9993
9994+#ifndef __ASSEMBLY__
9995+extern unsigned char __LOAD_PHYSICAL_ADDR[];
9996+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
9997+#endif
9998+
9999 /* Minimum kernel alignment, as a power of two */
10000 #ifdef CONFIG_X86_64
10001 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
10002diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
10003index 48f99f1..d78ebf9 100644
10004--- a/arch/x86/include/asm/cache.h
10005+++ b/arch/x86/include/asm/cache.h
10006@@ -5,12 +5,13 @@
10007
10008 /* L1 cache line size */
10009 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10010-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10011+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10012
10013 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
10014+#define __read_only __attribute__((__section__(".data..read_only")))
10015
10016 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
10017-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
10018+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
10019
10020 #ifdef CONFIG_X86_VSMP
10021 #ifdef CONFIG_SMP
10022diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
10023index 9863ee3..4a1f8e1 100644
10024--- a/arch/x86/include/asm/cacheflush.h
10025+++ b/arch/x86/include/asm/cacheflush.h
10026@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
10027 unsigned long pg_flags = pg->flags & _PGMT_MASK;
10028
10029 if (pg_flags == _PGMT_DEFAULT)
10030- return -1;
10031+ return ~0UL;
10032 else if (pg_flags == _PGMT_WC)
10033 return _PAGE_CACHE_WC;
10034 else if (pg_flags == _PGMT_UC_MINUS)
10035diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
10036index 46fc474..b02b0f9 100644
10037--- a/arch/x86/include/asm/checksum_32.h
10038+++ b/arch/x86/include/asm/checksum_32.h
10039@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
10040 int len, __wsum sum,
10041 int *src_err_ptr, int *dst_err_ptr);
10042
10043+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
10044+ int len, __wsum sum,
10045+ int *src_err_ptr, int *dst_err_ptr);
10046+
10047+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
10048+ int len, __wsum sum,
10049+ int *src_err_ptr, int *dst_err_ptr);
10050+
10051 /*
10052 * Note: when you get a NULL pointer exception here this means someone
10053 * passed in an incorrect kernel address to one of these functions.
10054@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
10055 int *err_ptr)
10056 {
10057 might_sleep();
10058- return csum_partial_copy_generic((__force void *)src, dst,
10059+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
10060 len, sum, err_ptr, NULL);
10061 }
10062
10063@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
10064 {
10065 might_sleep();
10066 if (access_ok(VERIFY_WRITE, dst, len))
10067- return csum_partial_copy_generic(src, (__force void *)dst,
10068+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
10069 len, sum, NULL, err_ptr);
10070
10071 if (len)
10072diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
10073index 99480e5..d81165b 100644
10074--- a/arch/x86/include/asm/cmpxchg.h
10075+++ b/arch/x86/include/asm/cmpxchg.h
10076@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
10077 __compiletime_error("Bad argument size for cmpxchg");
10078 extern void __xadd_wrong_size(void)
10079 __compiletime_error("Bad argument size for xadd");
10080+extern void __xadd_check_overflow_wrong_size(void)
10081+ __compiletime_error("Bad argument size for xadd_check_overflow");
10082 extern void __add_wrong_size(void)
10083 __compiletime_error("Bad argument size for add");
10084+extern void __add_check_overflow_wrong_size(void)
10085+ __compiletime_error("Bad argument size for add_check_overflow");
10086
10087 /*
10088 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
10089@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
10090 __ret; \
10091 })
10092
10093+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
10094+ ({ \
10095+ __typeof__ (*(ptr)) __ret = (arg); \
10096+ switch (sizeof(*(ptr))) { \
10097+ case __X86_CASE_L: \
10098+ asm volatile (lock #op "l %0, %1\n" \
10099+ "jno 0f\n" \
10100+ "mov %0,%1\n" \
10101+ "int $4\n0:\n" \
10102+ _ASM_EXTABLE(0b, 0b) \
10103+ : "+r" (__ret), "+m" (*(ptr)) \
10104+ : : "memory", "cc"); \
10105+ break; \
10106+ case __X86_CASE_Q: \
10107+ asm volatile (lock #op "q %q0, %1\n" \
10108+ "jno 0f\n" \
10109+ "mov %0,%1\n" \
10110+ "int $4\n0:\n" \
10111+ _ASM_EXTABLE(0b, 0b) \
10112+ : "+r" (__ret), "+m" (*(ptr)) \
10113+ : : "memory", "cc"); \
10114+ break; \
10115+ default: \
10116+ __ ## op ## _check_overflow_wrong_size(); \
10117+ } \
10118+ __ret; \
10119+ })
10120+
10121 /*
10122 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
10123 * Since this is generally used to protect other memory information, we
10124@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
10125 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
10126 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
10127
10128+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
10129+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
10130+
10131 #define __add(ptr, inc, lock) \
10132 ({ \
10133 __typeof__ (*(ptr)) __ret = (inc); \
10134diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
10135index 340ee49..4238ced 100644
10136--- a/arch/x86/include/asm/cpufeature.h
10137+++ b/arch/x86/include/asm/cpufeature.h
10138@@ -371,7 +371,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
10139 ".section .discard,\"aw\",@progbits\n"
10140 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
10141 ".previous\n"
10142- ".section .altinstr_replacement,\"ax\"\n"
10143+ ".section .altinstr_replacement,\"a\"\n"
10144 "3: movb $1,%0\n"
10145 "4:\n"
10146 ".previous\n"
10147diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
10148index e95822d..a90010e 100644
10149--- a/arch/x86/include/asm/desc.h
10150+++ b/arch/x86/include/asm/desc.h
10151@@ -4,6 +4,7 @@
10152 #include <asm/desc_defs.h>
10153 #include <asm/ldt.h>
10154 #include <asm/mmu.h>
10155+#include <asm/pgtable.h>
10156
10157 #include <linux/smp.h>
10158
10159@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
10160
10161 desc->type = (info->read_exec_only ^ 1) << 1;
10162 desc->type |= info->contents << 2;
10163+ desc->type |= info->seg_not_present ^ 1;
10164
10165 desc->s = 1;
10166 desc->dpl = 0x3;
10167@@ -34,19 +36,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
10168 }
10169
10170 extern struct desc_ptr idt_descr;
10171-extern gate_desc idt_table[];
10172 extern struct desc_ptr nmi_idt_descr;
10173-extern gate_desc nmi_idt_table[];
10174-
10175-struct gdt_page {
10176- struct desc_struct gdt[GDT_ENTRIES];
10177-} __attribute__((aligned(PAGE_SIZE)));
10178-
10179-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
10180+extern gate_desc idt_table[256];
10181+extern gate_desc nmi_idt_table[256];
10182
10183+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
10184 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
10185 {
10186- return per_cpu(gdt_page, cpu).gdt;
10187+ return cpu_gdt_table[cpu];
10188 }
10189
10190 #ifdef CONFIG_X86_64
10191@@ -71,8 +68,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
10192 unsigned long base, unsigned dpl, unsigned flags,
10193 unsigned short seg)
10194 {
10195- gate->a = (seg << 16) | (base & 0xffff);
10196- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
10197+ gate->gate.offset_low = base;
10198+ gate->gate.seg = seg;
10199+ gate->gate.reserved = 0;
10200+ gate->gate.type = type;
10201+ gate->gate.s = 0;
10202+ gate->gate.dpl = dpl;
10203+ gate->gate.p = 1;
10204+ gate->gate.offset_high = base >> 16;
10205 }
10206
10207 #endif
10208@@ -117,12 +120,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
10209
10210 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
10211 {
10212+ pax_open_kernel();
10213 memcpy(&idt[entry], gate, sizeof(*gate));
10214+ pax_close_kernel();
10215 }
10216
10217 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
10218 {
10219+ pax_open_kernel();
10220 memcpy(&ldt[entry], desc, 8);
10221+ pax_close_kernel();
10222 }
10223
10224 static inline void
10225@@ -136,7 +143,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
10226 default: size = sizeof(*gdt); break;
10227 }
10228
10229+ pax_open_kernel();
10230 memcpy(&gdt[entry], desc, size);
10231+ pax_close_kernel();
10232 }
10233
10234 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
10235@@ -209,7 +218,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
10236
10237 static inline void native_load_tr_desc(void)
10238 {
10239+ pax_open_kernel();
10240 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
10241+ pax_close_kernel();
10242 }
10243
10244 static inline void native_load_gdt(const struct desc_ptr *dtr)
10245@@ -246,8 +257,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
10246 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10247 unsigned int i;
10248
10249+ pax_open_kernel();
10250 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10251 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
10252+ pax_close_kernel();
10253 }
10254
10255 #define _LDT_empty(info) \
10256@@ -310,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10257 }
10258
10259 #ifdef CONFIG_X86_64
10260-static inline void set_nmi_gate(int gate, void *addr)
10261+static inline void set_nmi_gate(int gate, const void *addr)
10262 {
10263 gate_desc s;
10264
10265@@ -319,7 +332,7 @@ static inline void set_nmi_gate(int gate, void *addr)
10266 }
10267 #endif
10268
10269-static inline void _set_gate(int gate, unsigned type, void *addr,
10270+static inline void _set_gate(int gate, unsigned type, const void *addr,
10271 unsigned dpl, unsigned ist, unsigned seg)
10272 {
10273 gate_desc s;
10274@@ -338,7 +351,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
10275 * Pentium F0 0F bugfix can have resulted in the mapped
10276 * IDT being write-protected.
10277 */
10278-static inline void set_intr_gate(unsigned int n, void *addr)
10279+static inline void set_intr_gate(unsigned int n, const void *addr)
10280 {
10281 BUG_ON((unsigned)n > 0xFF);
10282 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
10283@@ -368,19 +381,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
10284 /*
10285 * This routine sets up an interrupt gate at directory privilege level 3.
10286 */
10287-static inline void set_system_intr_gate(unsigned int n, void *addr)
10288+static inline void set_system_intr_gate(unsigned int n, const void *addr)
10289 {
10290 BUG_ON((unsigned)n > 0xFF);
10291 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10292 }
10293
10294-static inline void set_system_trap_gate(unsigned int n, void *addr)
10295+static inline void set_system_trap_gate(unsigned int n, const void *addr)
10296 {
10297 BUG_ON((unsigned)n > 0xFF);
10298 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10299 }
10300
10301-static inline void set_trap_gate(unsigned int n, void *addr)
10302+static inline void set_trap_gate(unsigned int n, const void *addr)
10303 {
10304 BUG_ON((unsigned)n > 0xFF);
10305 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
10306@@ -389,19 +402,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
10307 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10308 {
10309 BUG_ON((unsigned)n > 0xFF);
10310- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10311+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10312 }
10313
10314-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10315+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10316 {
10317 BUG_ON((unsigned)n > 0xFF);
10318 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10319 }
10320
10321-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10322+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10323 {
10324 BUG_ON((unsigned)n > 0xFF);
10325 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10326 }
10327
10328+#ifdef CONFIG_X86_32
10329+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10330+{
10331+ struct desc_struct d;
10332+
10333+ if (likely(limit))
10334+ limit = (limit - 1UL) >> PAGE_SHIFT;
10335+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
10336+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10337+}
10338+#endif
10339+
10340 #endif /* _ASM_X86_DESC_H */
10341diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10342index 278441f..b95a174 100644
10343--- a/arch/x86/include/asm/desc_defs.h
10344+++ b/arch/x86/include/asm/desc_defs.h
10345@@ -31,6 +31,12 @@ struct desc_struct {
10346 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10347 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10348 };
10349+ struct {
10350+ u16 offset_low;
10351+ u16 seg;
10352+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10353+ unsigned offset_high: 16;
10354+ } gate;
10355 };
10356 } __attribute__((packed));
10357
10358diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
10359index 3778256..c5d4fce 100644
10360--- a/arch/x86/include/asm/e820.h
10361+++ b/arch/x86/include/asm/e820.h
10362@@ -69,7 +69,7 @@ struct e820map {
10363 #define ISA_START_ADDRESS 0xa0000
10364 #define ISA_END_ADDRESS 0x100000
10365
10366-#define BIOS_BEGIN 0x000a0000
10367+#define BIOS_BEGIN 0x000c0000
10368 #define BIOS_END 0x00100000
10369
10370 #define BIOS_ROM_BASE 0xffe00000
10371diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
10372index 5939f44..f8845f6 100644
10373--- a/arch/x86/include/asm/elf.h
10374+++ b/arch/x86/include/asm/elf.h
10375@@ -243,7 +243,25 @@ extern int force_personality32;
10376 the loader. We need to make sure that it is out of the way of the program
10377 that it will "exec", and that there is sufficient room for the brk. */
10378
10379+#ifdef CONFIG_PAX_SEGMEXEC
10380+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
10381+#else
10382 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
10383+#endif
10384+
10385+#ifdef CONFIG_PAX_ASLR
10386+#ifdef CONFIG_X86_32
10387+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
10388+
10389+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10390+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10391+#else
10392+#define PAX_ELF_ET_DYN_BASE 0x400000UL
10393+
10394+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10395+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10396+#endif
10397+#endif
10398
10399 /* This yields a mask that user programs can use to figure out what
10400 instruction set this CPU supports. This could be done in user space,
10401@@ -296,16 +314,12 @@ do { \
10402
10403 #define ARCH_DLINFO \
10404 do { \
10405- if (vdso_enabled) \
10406- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10407- (unsigned long)current->mm->context.vdso); \
10408+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10409 } while (0)
10410
10411 #define ARCH_DLINFO_X32 \
10412 do { \
10413- if (vdso_enabled) \
10414- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10415- (unsigned long)current->mm->context.vdso); \
10416+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10417 } while (0)
10418
10419 #define AT_SYSINFO 32
10420@@ -320,7 +334,7 @@ else \
10421
10422 #endif /* !CONFIG_X86_32 */
10423
10424-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10425+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10426
10427 #define VDSO_ENTRY \
10428 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10429@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
10430 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10431 #define compat_arch_setup_additional_pages syscall32_setup_pages
10432
10433-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10434-#define arch_randomize_brk arch_randomize_brk
10435-
10436 /*
10437 * True on X86_32 or when emulating IA32 on X86_64
10438 */
10439diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10440index cc70c1c..d96d011 100644
10441--- a/arch/x86/include/asm/emergency-restart.h
10442+++ b/arch/x86/include/asm/emergency-restart.h
10443@@ -15,6 +15,6 @@ enum reboot_type {
10444
10445 extern enum reboot_type reboot_type;
10446
10447-extern void machine_emergency_restart(void);
10448+extern void machine_emergency_restart(void) __noreturn;
10449
10450 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10451diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
10452index 4fa8815..71b121a 100644
10453--- a/arch/x86/include/asm/fpu-internal.h
10454+++ b/arch/x86/include/asm/fpu-internal.h
10455@@ -86,6 +86,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10456 {
10457 int err;
10458
10459+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10460+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10461+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
10462+#endif
10463+
10464 /* See comment in fxsave() below. */
10465 #ifdef CONFIG_AS_FXSAVEQ
10466 asm volatile("1: fxrstorq %[fx]\n\t"
10467@@ -115,6 +120,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10468 {
10469 int err;
10470
10471+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10472+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10473+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10474+#endif
10475+
10476 /*
10477 * Clear the bytes not touched by the fxsave and reserved
10478 * for the SW usage.
10479@@ -271,7 +281,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
10480 "emms\n\t" /* clear stack tags */
10481 "fildl %P[addr]", /* set F?P to defined value */
10482 X86_FEATURE_FXSAVE_LEAK,
10483- [addr] "m" (tsk->thread.fpu.has_fpu));
10484+ [addr] "m" (init_tss[smp_processor_id()].x86_tss.sp0));
10485
10486 return fpu_restore_checking(&tsk->thread.fpu);
10487 }
10488diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10489index 71ecbcb..bac10b7 100644
10490--- a/arch/x86/include/asm/futex.h
10491+++ b/arch/x86/include/asm/futex.h
10492@@ -11,16 +11,18 @@
10493 #include <asm/processor.h>
10494
10495 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10496+ typecheck(u32 __user *, uaddr); \
10497 asm volatile("1:\t" insn "\n" \
10498 "2:\t.section .fixup,\"ax\"\n" \
10499 "3:\tmov\t%3, %1\n" \
10500 "\tjmp\t2b\n" \
10501 "\t.previous\n" \
10502 _ASM_EXTABLE(1b, 3b) \
10503- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10504+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10505 : "i" (-EFAULT), "0" (oparg), "1" (0))
10506
10507 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10508+ typecheck(u32 __user *, uaddr); \
10509 asm volatile("1:\tmovl %2, %0\n" \
10510 "\tmovl\t%0, %3\n" \
10511 "\t" insn "\n" \
10512@@ -33,7 +35,7 @@
10513 _ASM_EXTABLE(1b, 4b) \
10514 _ASM_EXTABLE(2b, 4b) \
10515 : "=&a" (oldval), "=&r" (ret), \
10516- "+m" (*uaddr), "=&r" (tem) \
10517+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10518 : "r" (oparg), "i" (-EFAULT), "1" (0))
10519
10520 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10521@@ -60,10 +62,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10522
10523 switch (op) {
10524 case FUTEX_OP_SET:
10525- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10526+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10527 break;
10528 case FUTEX_OP_ADD:
10529- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10530+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10531 uaddr, oparg);
10532 break;
10533 case FUTEX_OP_OR:
10534@@ -122,13 +124,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
10535 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10536 return -EFAULT;
10537
10538- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
10539+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
10540 "2:\t.section .fixup, \"ax\"\n"
10541 "3:\tmov %3, %0\n"
10542 "\tjmp 2b\n"
10543 "\t.previous\n"
10544 _ASM_EXTABLE(1b, 3b)
10545- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
10546+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
10547 : "i" (-EFAULT), "r" (newval), "1" (oldval)
10548 : "memory"
10549 );
10550diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10551index eb92a6e..b98b2f4 100644
10552--- a/arch/x86/include/asm/hw_irq.h
10553+++ b/arch/x86/include/asm/hw_irq.h
10554@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
10555 extern void enable_IO_APIC(void);
10556
10557 /* Statistics */
10558-extern atomic_t irq_err_count;
10559-extern atomic_t irq_mis_count;
10560+extern atomic_unchecked_t irq_err_count;
10561+extern atomic_unchecked_t irq_mis_count;
10562
10563 /* EISA */
10564 extern void eisa_set_level_irq(unsigned int irq);
10565diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
10566index d8e8eef..99f81ae 100644
10567--- a/arch/x86/include/asm/io.h
10568+++ b/arch/x86/include/asm/io.h
10569@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
10570
10571 #include <linux/vmalloc.h>
10572
10573+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10574+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10575+{
10576+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10577+}
10578+
10579+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10580+{
10581+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10582+}
10583+
10584 /*
10585 * Convert a virtual cached pointer to an uncached pointer
10586 */
10587diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10588index bba3cf8..06bc8da 100644
10589--- a/arch/x86/include/asm/irqflags.h
10590+++ b/arch/x86/include/asm/irqflags.h
10591@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
10592 sti; \
10593 sysexit
10594
10595+#define GET_CR0_INTO_RDI mov %cr0, %rdi
10596+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10597+#define GET_CR3_INTO_RDI mov %cr3, %rdi
10598+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10599+
10600 #else
10601 #define INTERRUPT_RETURN iret
10602 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10603diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10604index 5478825..839e88c 100644
10605--- a/arch/x86/include/asm/kprobes.h
10606+++ b/arch/x86/include/asm/kprobes.h
10607@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
10608 #define RELATIVEJUMP_SIZE 5
10609 #define RELATIVECALL_OPCODE 0xe8
10610 #define RELATIVE_ADDR_SIZE 4
10611-#define MAX_STACK_SIZE 64
10612-#define MIN_STACK_SIZE(ADDR) \
10613- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10614- THREAD_SIZE - (unsigned long)(ADDR))) \
10615- ? (MAX_STACK_SIZE) \
10616- : (((unsigned long)current_thread_info()) + \
10617- THREAD_SIZE - (unsigned long)(ADDR)))
10618+#define MAX_STACK_SIZE 64UL
10619+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10620
10621 #define flush_insn_slot(p) do { } while (0)
10622
10623diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10624index e216ba0..453f6ec 100644
10625--- a/arch/x86/include/asm/kvm_host.h
10626+++ b/arch/x86/include/asm/kvm_host.h
10627@@ -679,7 +679,7 @@ struct kvm_x86_ops {
10628 int (*check_intercept)(struct kvm_vcpu *vcpu,
10629 struct x86_instruction_info *info,
10630 enum x86_intercept_stage stage);
10631-};
10632+} __do_const;
10633
10634 struct kvm_arch_async_pf {
10635 u32 token;
10636diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10637index c8bed0d..e5721fa 100644
10638--- a/arch/x86/include/asm/local.h
10639+++ b/arch/x86/include/asm/local.h
10640@@ -17,26 +17,58 @@ typedef struct {
10641
10642 static inline void local_inc(local_t *l)
10643 {
10644- asm volatile(_ASM_INC "%0"
10645+ asm volatile(_ASM_INC "%0\n"
10646+
10647+#ifdef CONFIG_PAX_REFCOUNT
10648+ "jno 0f\n"
10649+ _ASM_DEC "%0\n"
10650+ "int $4\n0:\n"
10651+ _ASM_EXTABLE(0b, 0b)
10652+#endif
10653+
10654 : "+m" (l->a.counter));
10655 }
10656
10657 static inline void local_dec(local_t *l)
10658 {
10659- asm volatile(_ASM_DEC "%0"
10660+ asm volatile(_ASM_DEC "%0\n"
10661+
10662+#ifdef CONFIG_PAX_REFCOUNT
10663+ "jno 0f\n"
10664+ _ASM_INC "%0\n"
10665+ "int $4\n0:\n"
10666+ _ASM_EXTABLE(0b, 0b)
10667+#endif
10668+
10669 : "+m" (l->a.counter));
10670 }
10671
10672 static inline void local_add(long i, local_t *l)
10673 {
10674- asm volatile(_ASM_ADD "%1,%0"
10675+ asm volatile(_ASM_ADD "%1,%0\n"
10676+
10677+#ifdef CONFIG_PAX_REFCOUNT
10678+ "jno 0f\n"
10679+ _ASM_SUB "%1,%0\n"
10680+ "int $4\n0:\n"
10681+ _ASM_EXTABLE(0b, 0b)
10682+#endif
10683+
10684 : "+m" (l->a.counter)
10685 : "ir" (i));
10686 }
10687
10688 static inline void local_sub(long i, local_t *l)
10689 {
10690- asm volatile(_ASM_SUB "%1,%0"
10691+ asm volatile(_ASM_SUB "%1,%0\n"
10692+
10693+#ifdef CONFIG_PAX_REFCOUNT
10694+ "jno 0f\n"
10695+ _ASM_ADD "%1,%0\n"
10696+ "int $4\n0:\n"
10697+ _ASM_EXTABLE(0b, 0b)
10698+#endif
10699+
10700 : "+m" (l->a.counter)
10701 : "ir" (i));
10702 }
10703@@ -54,7 +86,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10704 {
10705 unsigned char c;
10706
10707- asm volatile(_ASM_SUB "%2,%0; sete %1"
10708+ asm volatile(_ASM_SUB "%2,%0\n"
10709+
10710+#ifdef CONFIG_PAX_REFCOUNT
10711+ "jno 0f\n"
10712+ _ASM_ADD "%2,%0\n"
10713+ "int $4\n0:\n"
10714+ _ASM_EXTABLE(0b, 0b)
10715+#endif
10716+
10717+ "sete %1\n"
10718 : "+m" (l->a.counter), "=qm" (c)
10719 : "ir" (i) : "memory");
10720 return c;
10721@@ -72,7 +113,16 @@ static inline int local_dec_and_test(local_t *l)
10722 {
10723 unsigned char c;
10724
10725- asm volatile(_ASM_DEC "%0; sete %1"
10726+ asm volatile(_ASM_DEC "%0\n"
10727+
10728+#ifdef CONFIG_PAX_REFCOUNT
10729+ "jno 0f\n"
10730+ _ASM_INC "%0\n"
10731+ "int $4\n0:\n"
10732+ _ASM_EXTABLE(0b, 0b)
10733+#endif
10734+
10735+ "sete %1\n"
10736 : "+m" (l->a.counter), "=qm" (c)
10737 : : "memory");
10738 return c != 0;
10739@@ -90,7 +140,16 @@ static inline int local_inc_and_test(local_t *l)
10740 {
10741 unsigned char c;
10742
10743- asm volatile(_ASM_INC "%0; sete %1"
10744+ asm volatile(_ASM_INC "%0\n"
10745+
10746+#ifdef CONFIG_PAX_REFCOUNT
10747+ "jno 0f\n"
10748+ _ASM_DEC "%0\n"
10749+ "int $4\n0:\n"
10750+ _ASM_EXTABLE(0b, 0b)
10751+#endif
10752+
10753+ "sete %1\n"
10754 : "+m" (l->a.counter), "=qm" (c)
10755 : : "memory");
10756 return c != 0;
10757@@ -109,7 +168,16 @@ static inline int local_add_negative(long i, local_t *l)
10758 {
10759 unsigned char c;
10760
10761- asm volatile(_ASM_ADD "%2,%0; sets %1"
10762+ asm volatile(_ASM_ADD "%2,%0\n"
10763+
10764+#ifdef CONFIG_PAX_REFCOUNT
10765+ "jno 0f\n"
10766+ _ASM_SUB "%2,%0\n"
10767+ "int $4\n0:\n"
10768+ _ASM_EXTABLE(0b, 0b)
10769+#endif
10770+
10771+ "sets %1\n"
10772 : "+m" (l->a.counter), "=qm" (c)
10773 : "ir" (i) : "memory");
10774 return c;
10775@@ -132,7 +200,15 @@ static inline long local_add_return(long i, local_t *l)
10776 #endif
10777 /* Modern 486+ processor */
10778 __i = i;
10779- asm volatile(_ASM_XADD "%0, %1;"
10780+ asm volatile(_ASM_XADD "%0, %1\n"
10781+
10782+#ifdef CONFIG_PAX_REFCOUNT
10783+ "jno 0f\n"
10784+ _ASM_MOV "%0,%1\n"
10785+ "int $4\n0:\n"
10786+ _ASM_EXTABLE(0b, 0b)
10787+#endif
10788+
10789 : "+r" (i), "+m" (l->a.counter)
10790 : : "memory");
10791 return i + __i;
10792diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10793index 593e51d..fa69c9a 100644
10794--- a/arch/x86/include/asm/mman.h
10795+++ b/arch/x86/include/asm/mman.h
10796@@ -5,4 +5,14 @@
10797
10798 #include <asm-generic/mman.h>
10799
10800+#ifdef __KERNEL__
10801+#ifndef __ASSEMBLY__
10802+#ifdef CONFIG_X86_32
10803+#define arch_mmap_check i386_mmap_check
10804+int i386_mmap_check(unsigned long addr, unsigned long len,
10805+ unsigned long flags);
10806+#endif
10807+#endif
10808+#endif
10809+
10810 #endif /* _ASM_X86_MMAN_H */
10811diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10812index 5f55e69..e20bfb1 100644
10813--- a/arch/x86/include/asm/mmu.h
10814+++ b/arch/x86/include/asm/mmu.h
10815@@ -9,7 +9,7 @@
10816 * we put the segment information here.
10817 */
10818 typedef struct {
10819- void *ldt;
10820+ struct desc_struct *ldt;
10821 int size;
10822
10823 #ifdef CONFIG_X86_64
10824@@ -18,7 +18,19 @@ typedef struct {
10825 #endif
10826
10827 struct mutex lock;
10828- void *vdso;
10829+ unsigned long vdso;
10830+
10831+#ifdef CONFIG_X86_32
10832+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10833+ unsigned long user_cs_base;
10834+ unsigned long user_cs_limit;
10835+
10836+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10837+ cpumask_t cpu_user_cs_mask;
10838+#endif
10839+
10840+#endif
10841+#endif
10842 } mm_context_t;
10843
10844 #ifdef CONFIG_SMP
10845diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10846index 6902152..da4283a 100644
10847--- a/arch/x86/include/asm/mmu_context.h
10848+++ b/arch/x86/include/asm/mmu_context.h
10849@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10850
10851 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10852 {
10853+
10854+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10855+ unsigned int i;
10856+ pgd_t *pgd;
10857+
10858+ pax_open_kernel();
10859+ pgd = get_cpu_pgd(smp_processor_id());
10860+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10861+ set_pgd_batched(pgd+i, native_make_pgd(0));
10862+ pax_close_kernel();
10863+#endif
10864+
10865 #ifdef CONFIG_SMP
10866 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10867 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10868@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10869 struct task_struct *tsk)
10870 {
10871 unsigned cpu = smp_processor_id();
10872+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10873+ int tlbstate = TLBSTATE_OK;
10874+#endif
10875
10876 if (likely(prev != next)) {
10877 #ifdef CONFIG_SMP
10878+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10879+ tlbstate = percpu_read(cpu_tlbstate.state);
10880+#endif
10881 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10882 percpu_write(cpu_tlbstate.active_mm, next);
10883 #endif
10884 cpumask_set_cpu(cpu, mm_cpumask(next));
10885
10886 /* Re-load page tables */
10887+#ifdef CONFIG_PAX_PER_CPU_PGD
10888+ pax_open_kernel();
10889+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
10890+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
10891+ pax_close_kernel();
10892+ load_cr3(get_cpu_pgd(cpu));
10893+#else
10894 load_cr3(next->pgd);
10895+#endif
10896
10897 /* stop flush ipis for the previous mm */
10898 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10899@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10900 */
10901 if (unlikely(prev->context.ldt != next->context.ldt))
10902 load_LDT_nolock(&next->context);
10903- }
10904+
10905+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10906+ if (!(__supported_pte_mask & _PAGE_NX)) {
10907+ smp_mb__before_clear_bit();
10908+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10909+ smp_mb__after_clear_bit();
10910+ cpu_set(cpu, next->context.cpu_user_cs_mask);
10911+ }
10912+#endif
10913+
10914+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10915+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10916+ prev->context.user_cs_limit != next->context.user_cs_limit))
10917+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10918 #ifdef CONFIG_SMP
10919+ else if (unlikely(tlbstate != TLBSTATE_OK))
10920+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10921+#endif
10922+#endif
10923+
10924+ }
10925 else {
10926+
10927+#ifdef CONFIG_PAX_PER_CPU_PGD
10928+ pax_open_kernel();
10929+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
10930+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
10931+ pax_close_kernel();
10932+ load_cr3(get_cpu_pgd(cpu));
10933+#endif
10934+
10935+#ifdef CONFIG_SMP
10936 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10937 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10938
10939@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10940 * tlb flush IPI delivery. We must reload CR3
10941 * to make sure to use no freed page tables.
10942 */
10943+
10944+#ifndef CONFIG_PAX_PER_CPU_PGD
10945 load_cr3(next->pgd);
10946+#endif
10947+
10948 load_LDT_nolock(&next->context);
10949+
10950+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10951+ if (!(__supported_pte_mask & _PAGE_NX))
10952+ cpu_set(cpu, next->context.cpu_user_cs_mask);
10953+#endif
10954+
10955+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10956+#ifdef CONFIG_PAX_PAGEEXEC
10957+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
10958+#endif
10959+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10960+#endif
10961+
10962 }
10963+#endif
10964 }
10965-#endif
10966 }
10967
10968 #define activate_mm(prev, next) \
10969diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10970index 9eae775..c914fea 100644
10971--- a/arch/x86/include/asm/module.h
10972+++ b/arch/x86/include/asm/module.h
10973@@ -5,6 +5,7 @@
10974
10975 #ifdef CONFIG_X86_64
10976 /* X86_64 does not define MODULE_PROC_FAMILY */
10977+#define MODULE_PROC_FAMILY ""
10978 #elif defined CONFIG_M386
10979 #define MODULE_PROC_FAMILY "386 "
10980 #elif defined CONFIG_M486
10981@@ -59,8 +60,20 @@
10982 #error unknown processor family
10983 #endif
10984
10985-#ifdef CONFIG_X86_32
10986-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
10987+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10988+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
10989+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
10990+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
10991+#else
10992+#define MODULE_PAX_KERNEXEC ""
10993 #endif
10994
10995+#ifdef CONFIG_PAX_MEMORY_UDEREF
10996+#define MODULE_PAX_UDEREF "UDEREF "
10997+#else
10998+#define MODULE_PAX_UDEREF ""
10999+#endif
11000+
11001+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
11002+
11003 #endif /* _ASM_X86_MODULE_H */
11004diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
11005index 7639dbf..e08a58c 100644
11006--- a/arch/x86/include/asm/page_64_types.h
11007+++ b/arch/x86/include/asm/page_64_types.h
11008@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
11009
11010 /* duplicated to the one in bootmem.h */
11011 extern unsigned long max_pfn;
11012-extern unsigned long phys_base;
11013+extern const unsigned long phys_base;
11014
11015 extern unsigned long __phys_addr(unsigned long);
11016 #define __phys_reloc_hide(x) (x)
11017diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
11018index aa0f913..0c5bc6a 100644
11019--- a/arch/x86/include/asm/paravirt.h
11020+++ b/arch/x86/include/asm/paravirt.h
11021@@ -668,6 +668,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
11022 val);
11023 }
11024
11025+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11026+{
11027+ pgdval_t val = native_pgd_val(pgd);
11028+
11029+ if (sizeof(pgdval_t) > sizeof(long))
11030+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
11031+ val, (u64)val >> 32);
11032+ else
11033+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
11034+ val);
11035+}
11036+
11037 static inline void pgd_clear(pgd_t *pgdp)
11038 {
11039 set_pgd(pgdp, __pgd(0));
11040@@ -749,6 +761,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
11041 pv_mmu_ops.set_fixmap(idx, phys, flags);
11042 }
11043
11044+#ifdef CONFIG_PAX_KERNEXEC
11045+static inline unsigned long pax_open_kernel(void)
11046+{
11047+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
11048+}
11049+
11050+static inline unsigned long pax_close_kernel(void)
11051+{
11052+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
11053+}
11054+#else
11055+static inline unsigned long pax_open_kernel(void) { return 0; }
11056+static inline unsigned long pax_close_kernel(void) { return 0; }
11057+#endif
11058+
11059 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
11060
11061 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
11062@@ -965,7 +992,7 @@ extern void default_banner(void);
11063
11064 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
11065 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
11066-#define PARA_INDIRECT(addr) *%cs:addr
11067+#define PARA_INDIRECT(addr) *%ss:addr
11068 #endif
11069
11070 #define INTERRUPT_RETURN \
11071@@ -1042,6 +1069,21 @@ extern void default_banner(void);
11072 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
11073 CLBR_NONE, \
11074 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
11075+
11076+#define GET_CR0_INTO_RDI \
11077+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
11078+ mov %rax,%rdi
11079+
11080+#define SET_RDI_INTO_CR0 \
11081+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11082+
11083+#define GET_CR3_INTO_RDI \
11084+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
11085+ mov %rax,%rdi
11086+
11087+#define SET_RDI_INTO_CR3 \
11088+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
11089+
11090 #endif /* CONFIG_X86_32 */
11091
11092 #endif /* __ASSEMBLY__ */
11093diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
11094index 8e8b9a4..f07d725 100644
11095--- a/arch/x86/include/asm/paravirt_types.h
11096+++ b/arch/x86/include/asm/paravirt_types.h
11097@@ -84,20 +84,20 @@ struct pv_init_ops {
11098 */
11099 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
11100 unsigned long addr, unsigned len);
11101-};
11102+} __no_const;
11103
11104
11105 struct pv_lazy_ops {
11106 /* Set deferred update mode, used for batching operations. */
11107 void (*enter)(void);
11108 void (*leave)(void);
11109-};
11110+} __no_const;
11111
11112 struct pv_time_ops {
11113 unsigned long long (*sched_clock)(void);
11114 unsigned long long (*steal_clock)(int cpu);
11115 unsigned long (*get_tsc_khz)(void);
11116-};
11117+} __no_const;
11118
11119 struct pv_cpu_ops {
11120 /* hooks for various privileged instructions */
11121@@ -193,7 +193,7 @@ struct pv_cpu_ops {
11122
11123 void (*start_context_switch)(struct task_struct *prev);
11124 void (*end_context_switch)(struct task_struct *next);
11125-};
11126+} __no_const;
11127
11128 struct pv_irq_ops {
11129 /*
11130@@ -224,7 +224,7 @@ struct pv_apic_ops {
11131 unsigned long start_eip,
11132 unsigned long start_esp);
11133 #endif
11134-};
11135+} __no_const;
11136
11137 struct pv_mmu_ops {
11138 unsigned long (*read_cr2)(void);
11139@@ -313,6 +313,7 @@ struct pv_mmu_ops {
11140 struct paravirt_callee_save make_pud;
11141
11142 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
11143+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
11144 #endif /* PAGETABLE_LEVELS == 4 */
11145 #endif /* PAGETABLE_LEVELS >= 3 */
11146
11147@@ -324,6 +325,12 @@ struct pv_mmu_ops {
11148 an mfn. We can tell which is which from the index. */
11149 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
11150 phys_addr_t phys, pgprot_t flags);
11151+
11152+#ifdef CONFIG_PAX_KERNEXEC
11153+ unsigned long (*pax_open_kernel)(void);
11154+ unsigned long (*pax_close_kernel)(void);
11155+#endif
11156+
11157 };
11158
11159 struct arch_spinlock;
11160@@ -334,7 +341,7 @@ struct pv_lock_ops {
11161 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
11162 int (*spin_trylock)(struct arch_spinlock *lock);
11163 void (*spin_unlock)(struct arch_spinlock *lock);
11164-};
11165+} __no_const;
11166
11167 /* This contains all the paravirt structures: we get a convenient
11168 * number for each function using the offset which we use to indicate
11169diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
11170index b4389a4..7024269 100644
11171--- a/arch/x86/include/asm/pgalloc.h
11172+++ b/arch/x86/include/asm/pgalloc.h
11173@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
11174 pmd_t *pmd, pte_t *pte)
11175 {
11176 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11177+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
11178+}
11179+
11180+static inline void pmd_populate_user(struct mm_struct *mm,
11181+ pmd_t *pmd, pte_t *pte)
11182+{
11183+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11184 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
11185 }
11186
11187@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
11188
11189 #ifdef CONFIG_X86_PAE
11190 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
11191+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
11192+{
11193+ pud_populate(mm, pudp, pmd);
11194+}
11195 #else /* !CONFIG_X86_PAE */
11196 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11197 {
11198 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11199 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
11200 }
11201+
11202+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11203+{
11204+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11205+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
11206+}
11207 #endif /* CONFIG_X86_PAE */
11208
11209 #if PAGETABLE_LEVELS > 3
11210@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11211 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
11212 }
11213
11214+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11215+{
11216+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
11217+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
11218+}
11219+
11220 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
11221 {
11222 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
11223diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
11224index 98391db..8f6984e 100644
11225--- a/arch/x86/include/asm/pgtable-2level.h
11226+++ b/arch/x86/include/asm/pgtable-2level.h
11227@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
11228
11229 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11230 {
11231+ pax_open_kernel();
11232 *pmdp = pmd;
11233+ pax_close_kernel();
11234 }
11235
11236 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11237diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
11238index effff47..bbb8295 100644
11239--- a/arch/x86/include/asm/pgtable-3level.h
11240+++ b/arch/x86/include/asm/pgtable-3level.h
11241@@ -31,6 +31,56 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
11242 ptep->pte_low = pte.pte_low;
11243 }
11244
11245+#define __HAVE_ARCH_READ_PMD_ATOMIC
11246+/*
11247+ * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
11248+ * a "*pmdp" dereference done by gcc. Problem is, in certain places
11249+ * where pte_offset_map_lock is called, concurrent page faults are
11250+ * allowed, if the mmap_sem is hold for reading. An example is mincore
11251+ * vs page faults vs MADV_DONTNEED. On the page fault side
11252+ * pmd_populate rightfully does a set_64bit, but if we're reading the
11253+ * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
11254+ * because gcc will not read the 64bit of the pmd atomically. To fix
11255+ * this all places running pmd_offset_map_lock() while holding the
11256+ * mmap_sem in read mode, shall read the pmdp pointer using this
11257+ * function to know if the pmd is null nor not, and in turn to know if
11258+ * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
11259+ * operations.
11260+ *
11261+ * Without THP if the mmap_sem is hold for reading, the
11262+ * pmd can only transition from null to not null while read_pmd_atomic runs.
11263+ * So there's no need of literally reading it atomically.
11264+ *
11265+ * With THP if the mmap_sem is hold for reading, the pmd can become
11266+ * THP or null or point to a pte (and in turn become "stable") at any
11267+ * time under read_pmd_atomic, so it's mandatory to read it atomically
11268+ * with cmpxchg8b.
11269+ */
11270+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
11271+static inline pmd_t read_pmd_atomic(pmd_t *pmdp)
11272+{
11273+ pmdval_t ret;
11274+ u32 *tmp = (u32 *)pmdp;
11275+
11276+ ret = (pmdval_t) (*tmp);
11277+ if (ret) {
11278+ /*
11279+ * If the low part is null, we must not read the high part
11280+ * or we can end up with a partial pmd.
11281+ */
11282+ smp_rmb();
11283+ ret |= ((pmdval_t)*(tmp + 1)) << 32;
11284+ }
11285+
11286+ return __pmd(ret);
11287+}
11288+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
11289+static inline pmd_t read_pmd_atomic(pmd_t *pmdp)
11290+{
11291+ return __pmd(atomic64_read((atomic64_t *)pmdp));
11292+}
11293+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
11294+
11295 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11296 {
11297 set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
11298@@ -38,12 +88,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11299
11300 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11301 {
11302+ pax_open_kernel();
11303 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
11304+ pax_close_kernel();
11305 }
11306
11307 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11308 {
11309+ pax_open_kernel();
11310 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
11311+ pax_close_kernel();
11312 }
11313
11314 /*
11315diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
11316index 49afb3f..91a8c63 100644
11317--- a/arch/x86/include/asm/pgtable.h
11318+++ b/arch/x86/include/asm/pgtable.h
11319@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11320
11321 #ifndef __PAGETABLE_PUD_FOLDED
11322 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
11323+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
11324 #define pgd_clear(pgd) native_pgd_clear(pgd)
11325 #endif
11326
11327@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11328
11329 #define arch_end_context_switch(prev) do {} while(0)
11330
11331+#define pax_open_kernel() native_pax_open_kernel()
11332+#define pax_close_kernel() native_pax_close_kernel()
11333 #endif /* CONFIG_PARAVIRT */
11334
11335+#define __HAVE_ARCH_PAX_OPEN_KERNEL
11336+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
11337+
11338+#ifdef CONFIG_PAX_KERNEXEC
11339+static inline unsigned long native_pax_open_kernel(void)
11340+{
11341+ unsigned long cr0;
11342+
11343+ preempt_disable();
11344+ barrier();
11345+ cr0 = read_cr0() ^ X86_CR0_WP;
11346+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
11347+ write_cr0(cr0);
11348+ return cr0 ^ X86_CR0_WP;
11349+}
11350+
11351+static inline unsigned long native_pax_close_kernel(void)
11352+{
11353+ unsigned long cr0;
11354+
11355+ cr0 = read_cr0() ^ X86_CR0_WP;
11356+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11357+ write_cr0(cr0);
11358+ barrier();
11359+ preempt_enable_no_resched();
11360+ return cr0 ^ X86_CR0_WP;
11361+}
11362+#else
11363+static inline unsigned long native_pax_open_kernel(void) { return 0; }
11364+static inline unsigned long native_pax_close_kernel(void) { return 0; }
11365+#endif
11366+
11367 /*
11368 * The following only work if pte_present() is true.
11369 * Undefined behaviour if not..
11370 */
11371+static inline int pte_user(pte_t pte)
11372+{
11373+ return pte_val(pte) & _PAGE_USER;
11374+}
11375+
11376 static inline int pte_dirty(pte_t pte)
11377 {
11378 return pte_flags(pte) & _PAGE_DIRTY;
11379@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11380 return pte_clear_flags(pte, _PAGE_RW);
11381 }
11382
11383+static inline pte_t pte_mkread(pte_t pte)
11384+{
11385+ return __pte(pte_val(pte) | _PAGE_USER);
11386+}
11387+
11388 static inline pte_t pte_mkexec(pte_t pte)
11389 {
11390- return pte_clear_flags(pte, _PAGE_NX);
11391+#ifdef CONFIG_X86_PAE
11392+ if (__supported_pte_mask & _PAGE_NX)
11393+ return pte_clear_flags(pte, _PAGE_NX);
11394+ else
11395+#endif
11396+ return pte_set_flags(pte, _PAGE_USER);
11397+}
11398+
11399+static inline pte_t pte_exprotect(pte_t pte)
11400+{
11401+#ifdef CONFIG_X86_PAE
11402+ if (__supported_pte_mask & _PAGE_NX)
11403+ return pte_set_flags(pte, _PAGE_NX);
11404+ else
11405+#endif
11406+ return pte_clear_flags(pte, _PAGE_USER);
11407 }
11408
11409 static inline pte_t pte_mkdirty(pte_t pte)
11410@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11411 #endif
11412
11413 #ifndef __ASSEMBLY__
11414+
11415+#ifdef CONFIG_PAX_PER_CPU_PGD
11416+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11417+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11418+{
11419+ return cpu_pgd[cpu];
11420+}
11421+#endif
11422+
11423 #include <linux/mm_types.h>
11424
11425 static inline int pte_none(pte_t pte)
11426@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11427
11428 static inline int pgd_bad(pgd_t pgd)
11429 {
11430- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11431+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11432 }
11433
11434 static inline int pgd_none(pgd_t pgd)
11435@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
11436 * pgd_offset() returns a (pgd_t *)
11437 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11438 */
11439-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11440+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11441+
11442+#ifdef CONFIG_PAX_PER_CPU_PGD
11443+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11444+#endif
11445+
11446 /*
11447 * a shortcut which implies the use of the kernel's pgd, instead
11448 * of a process's
11449@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
11450 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11451 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11452
11453+#ifdef CONFIG_X86_32
11454+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11455+#else
11456+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11457+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11458+
11459+#ifdef CONFIG_PAX_MEMORY_UDEREF
11460+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11461+#else
11462+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11463+#endif
11464+
11465+#endif
11466+
11467 #ifndef __ASSEMBLY__
11468
11469 extern int direct_gbpages;
11470@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
11471 * dst and src can be on the same page, but the range must not overlap,
11472 * and must not cross a page boundary.
11473 */
11474-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11475+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11476 {
11477- memcpy(dst, src, count * sizeof(pgd_t));
11478+ pax_open_kernel();
11479+ while (count--)
11480+ *dst++ = *src++;
11481+ pax_close_kernel();
11482 }
11483
11484+#ifdef CONFIG_PAX_PER_CPU_PGD
11485+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
11486+#endif
11487+
11488+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11489+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
11490+#else
11491+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
11492+#endif
11493
11494 #include <asm-generic/pgtable.h>
11495 #endif /* __ASSEMBLY__ */
11496diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11497index 0c92113..34a77c6 100644
11498--- a/arch/x86/include/asm/pgtable_32.h
11499+++ b/arch/x86/include/asm/pgtable_32.h
11500@@ -25,9 +25,6 @@
11501 struct mm_struct;
11502 struct vm_area_struct;
11503
11504-extern pgd_t swapper_pg_dir[1024];
11505-extern pgd_t initial_page_table[1024];
11506-
11507 static inline void pgtable_cache_init(void) { }
11508 static inline void check_pgt_cache(void) { }
11509 void paging_init(void);
11510@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11511 # include <asm/pgtable-2level.h>
11512 #endif
11513
11514+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11515+extern pgd_t initial_page_table[PTRS_PER_PGD];
11516+#ifdef CONFIG_X86_PAE
11517+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11518+#endif
11519+
11520 #if defined(CONFIG_HIGHPTE)
11521 #define pte_offset_map(dir, address) \
11522 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
11523@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11524 /* Clear a kernel PTE and flush it from the TLB */
11525 #define kpte_clear_flush(ptep, vaddr) \
11526 do { \
11527+ pax_open_kernel(); \
11528 pte_clear(&init_mm, (vaddr), (ptep)); \
11529+ pax_close_kernel(); \
11530 __flush_tlb_one((vaddr)); \
11531 } while (0)
11532
11533@@ -74,6 +79,9 @@ do { \
11534
11535 #endif /* !__ASSEMBLY__ */
11536
11537+#define HAVE_ARCH_UNMAPPED_AREA
11538+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11539+
11540 /*
11541 * kern_addr_valid() is (1) for FLATMEM and (0) for
11542 * SPARSEMEM and DISCONTIGMEM
11543diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11544index ed5903b..c7fe163 100644
11545--- a/arch/x86/include/asm/pgtable_32_types.h
11546+++ b/arch/x86/include/asm/pgtable_32_types.h
11547@@ -8,7 +8,7 @@
11548 */
11549 #ifdef CONFIG_X86_PAE
11550 # include <asm/pgtable-3level_types.h>
11551-# define PMD_SIZE (1UL << PMD_SHIFT)
11552+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11553 # define PMD_MASK (~(PMD_SIZE - 1))
11554 #else
11555 # include <asm/pgtable-2level_types.h>
11556@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11557 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11558 #endif
11559
11560+#ifdef CONFIG_PAX_KERNEXEC
11561+#ifndef __ASSEMBLY__
11562+extern unsigned char MODULES_EXEC_VADDR[];
11563+extern unsigned char MODULES_EXEC_END[];
11564+#endif
11565+#include <asm/boot.h>
11566+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11567+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11568+#else
11569+#define ktla_ktva(addr) (addr)
11570+#define ktva_ktla(addr) (addr)
11571+#endif
11572+
11573 #define MODULES_VADDR VMALLOC_START
11574 #define MODULES_END VMALLOC_END
11575 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11576diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11577index 975f709..9f779c9 100644
11578--- a/arch/x86/include/asm/pgtable_64.h
11579+++ b/arch/x86/include/asm/pgtable_64.h
11580@@ -16,10 +16,14 @@
11581
11582 extern pud_t level3_kernel_pgt[512];
11583 extern pud_t level3_ident_pgt[512];
11584+extern pud_t level3_vmalloc_start_pgt[512];
11585+extern pud_t level3_vmalloc_end_pgt[512];
11586+extern pud_t level3_vmemmap_pgt[512];
11587+extern pud_t level2_vmemmap_pgt[512];
11588 extern pmd_t level2_kernel_pgt[512];
11589 extern pmd_t level2_fixmap_pgt[512];
11590-extern pmd_t level2_ident_pgt[512];
11591-extern pgd_t init_level4_pgt[];
11592+extern pmd_t level2_ident_pgt[512*2];
11593+extern pgd_t init_level4_pgt[512];
11594
11595 #define swapper_pg_dir init_level4_pgt
11596
11597@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11598
11599 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11600 {
11601+ pax_open_kernel();
11602 *pmdp = pmd;
11603+ pax_close_kernel();
11604 }
11605
11606 static inline void native_pmd_clear(pmd_t *pmd)
11607@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
11608
11609 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11610 {
11611+ pax_open_kernel();
11612 *pudp = pud;
11613+ pax_close_kernel();
11614 }
11615
11616 static inline void native_pud_clear(pud_t *pud)
11617@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
11618
11619 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11620 {
11621+ pax_open_kernel();
11622+ *pgdp = pgd;
11623+ pax_close_kernel();
11624+}
11625+
11626+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11627+{
11628 *pgdp = pgd;
11629 }
11630
11631diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11632index 766ea16..5b96cb3 100644
11633--- a/arch/x86/include/asm/pgtable_64_types.h
11634+++ b/arch/x86/include/asm/pgtable_64_types.h
11635@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11636 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11637 #define MODULES_END _AC(0xffffffffff000000, UL)
11638 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11639+#define MODULES_EXEC_VADDR MODULES_VADDR
11640+#define MODULES_EXEC_END MODULES_END
11641+
11642+#define ktla_ktva(addr) (addr)
11643+#define ktva_ktla(addr) (addr)
11644
11645 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11646diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11647index 013286a..8b42f4f 100644
11648--- a/arch/x86/include/asm/pgtable_types.h
11649+++ b/arch/x86/include/asm/pgtable_types.h
11650@@ -16,13 +16,12 @@
11651 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11652 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11653 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11654-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11655+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11656 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11657 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11658 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11659-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11660-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11661-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
11662+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11663+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
11664 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11665
11666 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11667@@ -40,7 +39,6 @@
11668 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11669 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11670 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11671-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11672 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11673 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11674 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11675@@ -57,8 +55,10 @@
11676
11677 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11678 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11679-#else
11680+#elif defined(CONFIG_KMEMCHECK)
11681 #define _PAGE_NX (_AT(pteval_t, 0))
11682+#else
11683+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11684 #endif
11685
11686 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11687@@ -96,6 +96,9 @@
11688 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11689 _PAGE_ACCESSED)
11690
11691+#define PAGE_READONLY_NOEXEC PAGE_READONLY
11692+#define PAGE_SHARED_NOEXEC PAGE_SHARED
11693+
11694 #define __PAGE_KERNEL_EXEC \
11695 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11696 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11697@@ -106,7 +109,7 @@
11698 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11699 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11700 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11701-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11702+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11703 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
11704 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
11705 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11706@@ -168,8 +171,8 @@
11707 * bits are combined, this will alow user to access the high address mapped
11708 * VDSO in the presence of CONFIG_COMPAT_VDSO
11709 */
11710-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11711-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11712+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11713+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11714 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11715 #endif
11716
11717@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11718 {
11719 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11720 }
11721+#endif
11722
11723+#if PAGETABLE_LEVELS == 3
11724+#include <asm-generic/pgtable-nopud.h>
11725+#endif
11726+
11727+#if PAGETABLE_LEVELS == 2
11728+#include <asm-generic/pgtable-nopmd.h>
11729+#endif
11730+
11731+#ifndef __ASSEMBLY__
11732 #if PAGETABLE_LEVELS > 3
11733 typedef struct { pudval_t pud; } pud_t;
11734
11735@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11736 return pud.pud;
11737 }
11738 #else
11739-#include <asm-generic/pgtable-nopud.h>
11740-
11741 static inline pudval_t native_pud_val(pud_t pud)
11742 {
11743 return native_pgd_val(pud.pgd);
11744@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11745 return pmd.pmd;
11746 }
11747 #else
11748-#include <asm-generic/pgtable-nopmd.h>
11749-
11750 static inline pmdval_t native_pmd_val(pmd_t pmd)
11751 {
11752 return native_pgd_val(pmd.pud.pgd);
11753@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
11754
11755 extern pteval_t __supported_pte_mask;
11756 extern void set_nx(void);
11757-extern int nx_enabled;
11758
11759 #define pgprot_writecombine pgprot_writecombine
11760 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11761diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11762index 4fa7dcc..764e33a 100644
11763--- a/arch/x86/include/asm/processor.h
11764+++ b/arch/x86/include/asm/processor.h
11765@@ -276,7 +276,7 @@ struct tss_struct {
11766
11767 } ____cacheline_aligned;
11768
11769-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11770+extern struct tss_struct init_tss[NR_CPUS];
11771
11772 /*
11773 * Save the original ist values for checking stack pointers during debugging
11774@@ -807,11 +807,18 @@ static inline void spin_lock_prefetch(const void *x)
11775 */
11776 #define TASK_SIZE PAGE_OFFSET
11777 #define TASK_SIZE_MAX TASK_SIZE
11778+
11779+#ifdef CONFIG_PAX_SEGMEXEC
11780+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11781+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11782+#else
11783 #define STACK_TOP TASK_SIZE
11784-#define STACK_TOP_MAX STACK_TOP
11785+#endif
11786+
11787+#define STACK_TOP_MAX TASK_SIZE
11788
11789 #define INIT_THREAD { \
11790- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11791+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11792 .vm86_info = NULL, \
11793 .sysenter_cs = __KERNEL_CS, \
11794 .io_bitmap_ptr = NULL, \
11795@@ -825,7 +832,7 @@ static inline void spin_lock_prefetch(const void *x)
11796 */
11797 #define INIT_TSS { \
11798 .x86_tss = { \
11799- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11800+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11801 .ss0 = __KERNEL_DS, \
11802 .ss1 = __KERNEL_CS, \
11803 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11804@@ -836,11 +843,7 @@ static inline void spin_lock_prefetch(const void *x)
11805 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11806
11807 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11808-#define KSTK_TOP(info) \
11809-({ \
11810- unsigned long *__ptr = (unsigned long *)(info); \
11811- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11812-})
11813+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11814
11815 /*
11816 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11817@@ -855,7 +858,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11818 #define task_pt_regs(task) \
11819 ({ \
11820 struct pt_regs *__regs__; \
11821- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11822+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11823 __regs__ - 1; \
11824 })
11825
11826@@ -865,13 +868,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11827 /*
11828 * User space process size. 47bits minus one guard page.
11829 */
11830-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11831+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11832
11833 /* This decides where the kernel will search for a free chunk of vm
11834 * space during mmap's.
11835 */
11836 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11837- 0xc0000000 : 0xFFFFe000)
11838+ 0xc0000000 : 0xFFFFf000)
11839
11840 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
11841 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11842@@ -882,11 +885,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11843 #define STACK_TOP_MAX TASK_SIZE_MAX
11844
11845 #define INIT_THREAD { \
11846- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11847+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11848 }
11849
11850 #define INIT_TSS { \
11851- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11852+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11853 }
11854
11855 /*
11856@@ -914,6 +917,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11857 */
11858 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11859
11860+#ifdef CONFIG_PAX_SEGMEXEC
11861+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11862+#endif
11863+
11864 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11865
11866 /* Get/set a process' ability to use the timestamp counter instruction */
11867@@ -976,12 +983,12 @@ extern bool cpu_has_amd_erratum(const int *);
11868
11869 void cpu_idle_wait(void);
11870
11871-extern unsigned long arch_align_stack(unsigned long sp);
11872+#define arch_align_stack(x) ((x) & ~0xfUL)
11873 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11874
11875 void default_idle(void);
11876 bool set_pm_idle_to_default(void);
11877
11878-void stop_this_cpu(void *dummy);
11879+void stop_this_cpu(void *dummy) __noreturn;
11880
11881 #endif /* _ASM_X86_PROCESSOR_H */
11882diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11883index dcfde52..dbfea06 100644
11884--- a/arch/x86/include/asm/ptrace.h
11885+++ b/arch/x86/include/asm/ptrace.h
11886@@ -155,28 +155,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11887 }
11888
11889 /*
11890- * user_mode_vm(regs) determines whether a register set came from user mode.
11891+ * user_mode(regs) determines whether a register set came from user mode.
11892 * This is true if V8086 mode was enabled OR if the register set was from
11893 * protected mode with RPL-3 CS value. This tricky test checks that with
11894 * one comparison. Many places in the kernel can bypass this full check
11895- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11896+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11897+ * be used.
11898 */
11899-static inline int user_mode(struct pt_regs *regs)
11900+static inline int user_mode_novm(struct pt_regs *regs)
11901 {
11902 #ifdef CONFIG_X86_32
11903 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11904 #else
11905- return !!(regs->cs & 3);
11906+ return !!(regs->cs & SEGMENT_RPL_MASK);
11907 #endif
11908 }
11909
11910-static inline int user_mode_vm(struct pt_regs *regs)
11911+static inline int user_mode(struct pt_regs *regs)
11912 {
11913 #ifdef CONFIG_X86_32
11914 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11915 USER_RPL;
11916 #else
11917- return user_mode(regs);
11918+ return user_mode_novm(regs);
11919 #endif
11920 }
11921
11922@@ -192,15 +193,16 @@ static inline int v8086_mode(struct pt_regs *regs)
11923 #ifdef CONFIG_X86_64
11924 static inline bool user_64bit_mode(struct pt_regs *regs)
11925 {
11926+ unsigned long cs = regs->cs & 0xffff;
11927 #ifndef CONFIG_PARAVIRT
11928 /*
11929 * On non-paravirt systems, this is the only long mode CPL 3
11930 * selector. We do not allow long mode selectors in the LDT.
11931 */
11932- return regs->cs == __USER_CS;
11933+ return cs == __USER_CS;
11934 #else
11935 /* Headers are too twisted for this to go in paravirt.h. */
11936- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
11937+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
11938 #endif
11939 }
11940 #endif
11941diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11942index 92f29706..a79cbbb 100644
11943--- a/arch/x86/include/asm/reboot.h
11944+++ b/arch/x86/include/asm/reboot.h
11945@@ -6,19 +6,19 @@
11946 struct pt_regs;
11947
11948 struct machine_ops {
11949- void (*restart)(char *cmd);
11950- void (*halt)(void);
11951- void (*power_off)(void);
11952+ void (* __noreturn restart)(char *cmd);
11953+ void (* __noreturn halt)(void);
11954+ void (* __noreturn power_off)(void);
11955 void (*shutdown)(void);
11956 void (*crash_shutdown)(struct pt_regs *);
11957- void (*emergency_restart)(void);
11958-};
11959+ void (* __noreturn emergency_restart)(void);
11960+} __no_const;
11961
11962 extern struct machine_ops machine_ops;
11963
11964 void native_machine_crash_shutdown(struct pt_regs *regs);
11965 void native_machine_shutdown(void);
11966-void machine_real_restart(unsigned int type);
11967+void machine_real_restart(unsigned int type) __noreturn;
11968 /* These must match dispatch_table in reboot_32.S */
11969 #define MRR_BIOS 0
11970 #define MRR_APM 1
11971diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
11972index 2dbe4a7..ce1db00 100644
11973--- a/arch/x86/include/asm/rwsem.h
11974+++ b/arch/x86/include/asm/rwsem.h
11975@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
11976 {
11977 asm volatile("# beginning down_read\n\t"
11978 LOCK_PREFIX _ASM_INC "(%1)\n\t"
11979+
11980+#ifdef CONFIG_PAX_REFCOUNT
11981+ "jno 0f\n"
11982+ LOCK_PREFIX _ASM_DEC "(%1)\n"
11983+ "int $4\n0:\n"
11984+ _ASM_EXTABLE(0b, 0b)
11985+#endif
11986+
11987 /* adds 0x00000001 */
11988 " jns 1f\n"
11989 " call call_rwsem_down_read_failed\n"
11990@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
11991 "1:\n\t"
11992 " mov %1,%2\n\t"
11993 " add %3,%2\n\t"
11994+
11995+#ifdef CONFIG_PAX_REFCOUNT
11996+ "jno 0f\n"
11997+ "sub %3,%2\n"
11998+ "int $4\n0:\n"
11999+ _ASM_EXTABLE(0b, 0b)
12000+#endif
12001+
12002 " jle 2f\n\t"
12003 LOCK_PREFIX " cmpxchg %2,%0\n\t"
12004 " jnz 1b\n\t"
12005@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
12006 long tmp;
12007 asm volatile("# beginning down_write\n\t"
12008 LOCK_PREFIX " xadd %1,(%2)\n\t"
12009+
12010+#ifdef CONFIG_PAX_REFCOUNT
12011+ "jno 0f\n"
12012+ "mov %1,(%2)\n"
12013+ "int $4\n0:\n"
12014+ _ASM_EXTABLE(0b, 0b)
12015+#endif
12016+
12017 /* adds 0xffff0001, returns the old value */
12018 " test %1,%1\n\t"
12019 /* was the count 0 before? */
12020@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
12021 long tmp;
12022 asm volatile("# beginning __up_read\n\t"
12023 LOCK_PREFIX " xadd %1,(%2)\n\t"
12024+
12025+#ifdef CONFIG_PAX_REFCOUNT
12026+ "jno 0f\n"
12027+ "mov %1,(%2)\n"
12028+ "int $4\n0:\n"
12029+ _ASM_EXTABLE(0b, 0b)
12030+#endif
12031+
12032 /* subtracts 1, returns the old value */
12033 " jns 1f\n\t"
12034 " call call_rwsem_wake\n" /* expects old value in %edx */
12035@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
12036 long tmp;
12037 asm volatile("# beginning __up_write\n\t"
12038 LOCK_PREFIX " xadd %1,(%2)\n\t"
12039+
12040+#ifdef CONFIG_PAX_REFCOUNT
12041+ "jno 0f\n"
12042+ "mov %1,(%2)\n"
12043+ "int $4\n0:\n"
12044+ _ASM_EXTABLE(0b, 0b)
12045+#endif
12046+
12047 /* subtracts 0xffff0001, returns the old value */
12048 " jns 1f\n\t"
12049 " call call_rwsem_wake\n" /* expects old value in %edx */
12050@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12051 {
12052 asm volatile("# beginning __downgrade_write\n\t"
12053 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
12054+
12055+#ifdef CONFIG_PAX_REFCOUNT
12056+ "jno 0f\n"
12057+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
12058+ "int $4\n0:\n"
12059+ _ASM_EXTABLE(0b, 0b)
12060+#endif
12061+
12062 /*
12063 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
12064 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
12065@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12066 */
12067 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12068 {
12069- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
12070+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
12071+
12072+#ifdef CONFIG_PAX_REFCOUNT
12073+ "jno 0f\n"
12074+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
12075+ "int $4\n0:\n"
12076+ _ASM_EXTABLE(0b, 0b)
12077+#endif
12078+
12079 : "+m" (sem->count)
12080 : "er" (delta));
12081 }
12082@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12083 */
12084 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
12085 {
12086- return delta + xadd(&sem->count, delta);
12087+ return delta + xadd_check_overflow(&sem->count, delta);
12088 }
12089
12090 #endif /* __KERNEL__ */
12091diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
12092index 1654662..5af4157 100644
12093--- a/arch/x86/include/asm/segment.h
12094+++ b/arch/x86/include/asm/segment.h
12095@@ -64,10 +64,15 @@
12096 * 26 - ESPFIX small SS
12097 * 27 - per-cpu [ offset to per-cpu data area ]
12098 * 28 - stack_canary-20 [ for stack protector ]
12099- * 29 - unused
12100- * 30 - unused
12101+ * 29 - PCI BIOS CS
12102+ * 30 - PCI BIOS DS
12103 * 31 - TSS for double fault handler
12104 */
12105+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
12106+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
12107+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
12108+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
12109+
12110 #define GDT_ENTRY_TLS_MIN 6
12111 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
12112
12113@@ -79,6 +84,8 @@
12114
12115 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
12116
12117+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
12118+
12119 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
12120
12121 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
12122@@ -104,6 +111,12 @@
12123 #define __KERNEL_STACK_CANARY 0
12124 #endif
12125
12126+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
12127+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
12128+
12129+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
12130+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
12131+
12132 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
12133
12134 /*
12135@@ -141,7 +154,7 @@
12136 */
12137
12138 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
12139-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
12140+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
12141
12142
12143 #else
12144@@ -165,6 +178,8 @@
12145 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
12146 #define __USER32_DS __USER_DS
12147
12148+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
12149+
12150 #define GDT_ENTRY_TSS 8 /* needs two entries */
12151 #define GDT_ENTRY_LDT 10 /* needs two entries */
12152 #define GDT_ENTRY_TLS_MIN 12
12153@@ -185,6 +200,7 @@
12154 #endif
12155
12156 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
12157+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
12158 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
12159 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
12160 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
12161@@ -263,7 +279,7 @@ static inline unsigned long get_limit(unsigned long segment)
12162 {
12163 unsigned long __limit;
12164 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
12165- return __limit + 1;
12166+ return __limit;
12167 }
12168
12169 #endif /* !__ASSEMBLY__ */
12170diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
12171index 0434c40..1714bf0 100644
12172--- a/arch/x86/include/asm/smp.h
12173+++ b/arch/x86/include/asm/smp.h
12174@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
12175 /* cpus sharing the last level cache: */
12176 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
12177 DECLARE_PER_CPU(u16, cpu_llc_id);
12178-DECLARE_PER_CPU(int, cpu_number);
12179+DECLARE_PER_CPU(unsigned int, cpu_number);
12180
12181 static inline struct cpumask *cpu_sibling_mask(int cpu)
12182 {
12183@@ -77,7 +77,7 @@ struct smp_ops {
12184
12185 void (*send_call_func_ipi)(const struct cpumask *mask);
12186 void (*send_call_func_single_ipi)(int cpu);
12187-};
12188+} __no_const;
12189
12190 /* Globals due to paravirt */
12191 extern void set_cpu_sibling_map(int cpu);
12192@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
12193 extern int safe_smp_processor_id(void);
12194
12195 #elif defined(CONFIG_X86_64_SMP)
12196-#define raw_smp_processor_id() (percpu_read(cpu_number))
12197-
12198-#define stack_smp_processor_id() \
12199-({ \
12200- struct thread_info *ti; \
12201- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
12202- ti->cpu; \
12203-})
12204+#define raw_smp_processor_id() (percpu_read(cpu_number))
12205+#define stack_smp_processor_id() raw_smp_processor_id()
12206 #define safe_smp_processor_id() smp_processor_id()
12207
12208 #endif
12209diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
12210index 76bfa2c..12d3fe7 100644
12211--- a/arch/x86/include/asm/spinlock.h
12212+++ b/arch/x86/include/asm/spinlock.h
12213@@ -175,6 +175,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
12214 static inline void arch_read_lock(arch_rwlock_t *rw)
12215 {
12216 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
12217+
12218+#ifdef CONFIG_PAX_REFCOUNT
12219+ "jno 0f\n"
12220+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
12221+ "int $4\n0:\n"
12222+ _ASM_EXTABLE(0b, 0b)
12223+#endif
12224+
12225 "jns 1f\n"
12226 "call __read_lock_failed\n\t"
12227 "1:\n"
12228@@ -184,6 +192,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
12229 static inline void arch_write_lock(arch_rwlock_t *rw)
12230 {
12231 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
12232+
12233+#ifdef CONFIG_PAX_REFCOUNT
12234+ "jno 0f\n"
12235+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
12236+ "int $4\n0:\n"
12237+ _ASM_EXTABLE(0b, 0b)
12238+#endif
12239+
12240 "jz 1f\n"
12241 "call __write_lock_failed\n\t"
12242 "1:\n"
12243@@ -213,13 +229,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
12244
12245 static inline void arch_read_unlock(arch_rwlock_t *rw)
12246 {
12247- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
12248+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
12249+
12250+#ifdef CONFIG_PAX_REFCOUNT
12251+ "jno 0f\n"
12252+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
12253+ "int $4\n0:\n"
12254+ _ASM_EXTABLE(0b, 0b)
12255+#endif
12256+
12257 :"+m" (rw->lock) : : "memory");
12258 }
12259
12260 static inline void arch_write_unlock(arch_rwlock_t *rw)
12261 {
12262- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
12263+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
12264+
12265+#ifdef CONFIG_PAX_REFCOUNT
12266+ "jno 0f\n"
12267+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
12268+ "int $4\n0:\n"
12269+ _ASM_EXTABLE(0b, 0b)
12270+#endif
12271+
12272 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
12273 }
12274
12275diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
12276index b5d9533..41655fa 100644
12277--- a/arch/x86/include/asm/stackprotector.h
12278+++ b/arch/x86/include/asm/stackprotector.h
12279@@ -47,7 +47,7 @@
12280 * head_32 for boot CPU and setup_per_cpu_areas() for others.
12281 */
12282 #define GDT_STACK_CANARY_INIT \
12283- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
12284+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
12285
12286 /*
12287 * Initialize the stackprotector canary value.
12288@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
12289
12290 static inline void load_stack_canary_segment(void)
12291 {
12292-#ifdef CONFIG_X86_32
12293+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
12294 asm volatile ("mov %0, %%gs" : : "r" (0));
12295 #endif
12296 }
12297diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
12298index 70bbe39..4ae2bd4 100644
12299--- a/arch/x86/include/asm/stacktrace.h
12300+++ b/arch/x86/include/asm/stacktrace.h
12301@@ -11,28 +11,20 @@
12302
12303 extern int kstack_depth_to_print;
12304
12305-struct thread_info;
12306+struct task_struct;
12307 struct stacktrace_ops;
12308
12309-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
12310- unsigned long *stack,
12311- unsigned long bp,
12312- const struct stacktrace_ops *ops,
12313- void *data,
12314- unsigned long *end,
12315- int *graph);
12316+typedef unsigned long walk_stack_t(struct task_struct *task,
12317+ void *stack_start,
12318+ unsigned long *stack,
12319+ unsigned long bp,
12320+ const struct stacktrace_ops *ops,
12321+ void *data,
12322+ unsigned long *end,
12323+ int *graph);
12324
12325-extern unsigned long
12326-print_context_stack(struct thread_info *tinfo,
12327- unsigned long *stack, unsigned long bp,
12328- const struct stacktrace_ops *ops, void *data,
12329- unsigned long *end, int *graph);
12330-
12331-extern unsigned long
12332-print_context_stack_bp(struct thread_info *tinfo,
12333- unsigned long *stack, unsigned long bp,
12334- const struct stacktrace_ops *ops, void *data,
12335- unsigned long *end, int *graph);
12336+extern walk_stack_t print_context_stack;
12337+extern walk_stack_t print_context_stack_bp;
12338
12339 /* Generic stack tracer with callbacks */
12340
12341@@ -40,7 +32,7 @@ struct stacktrace_ops {
12342 void (*address)(void *data, unsigned long address, int reliable);
12343 /* On negative return stop dumping */
12344 int (*stack)(void *data, char *name);
12345- walk_stack_t walk_stack;
12346+ walk_stack_t *walk_stack;
12347 };
12348
12349 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
12350diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
12351index 4ec45b3..a4f0a8a 100644
12352--- a/arch/x86/include/asm/switch_to.h
12353+++ b/arch/x86/include/asm/switch_to.h
12354@@ -108,7 +108,7 @@ do { \
12355 "call __switch_to\n\t" \
12356 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
12357 __switch_canary \
12358- "movq %P[thread_info](%%rsi),%%r8\n\t" \
12359+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
12360 "movq %%rax,%%rdi\n\t" \
12361 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
12362 "jnz ret_from_fork\n\t" \
12363@@ -119,7 +119,7 @@ do { \
12364 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
12365 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
12366 [_tif_fork] "i" (_TIF_FORK), \
12367- [thread_info] "i" (offsetof(struct task_struct, stack)), \
12368+ [thread_info] "m" (current_tinfo), \
12369 [current_task] "m" (current_task) \
12370 __switch_canary_iparam \
12371 : "memory", "cc" __EXTRA_CLOBBER)
12372diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
12373index 3fda9db4..4ca1c61 100644
12374--- a/arch/x86/include/asm/sys_ia32.h
12375+++ b/arch/x86/include/asm/sys_ia32.h
12376@@ -40,7 +40,7 @@ asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *,
12377 struct old_sigaction32 __user *);
12378 asmlinkage long sys32_alarm(unsigned int);
12379
12380-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
12381+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
12382 asmlinkage long sys32_sysfs(int, u32, u32);
12383
12384 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
12385diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
12386index ad6df8c..5e0cf6e 100644
12387--- a/arch/x86/include/asm/thread_info.h
12388+++ b/arch/x86/include/asm/thread_info.h
12389@@ -10,6 +10,7 @@
12390 #include <linux/compiler.h>
12391 #include <asm/page.h>
12392 #include <asm/types.h>
12393+#include <asm/percpu.h>
12394
12395 /*
12396 * low level task data that entry.S needs immediate access to
12397@@ -24,7 +25,6 @@ struct exec_domain;
12398 #include <linux/atomic.h>
12399
12400 struct thread_info {
12401- struct task_struct *task; /* main task structure */
12402 struct exec_domain *exec_domain; /* execution domain */
12403 __u32 flags; /* low level flags */
12404 __u32 status; /* thread synchronous flags */
12405@@ -34,19 +34,13 @@ struct thread_info {
12406 mm_segment_t addr_limit;
12407 struct restart_block restart_block;
12408 void __user *sysenter_return;
12409-#ifdef CONFIG_X86_32
12410- unsigned long previous_esp; /* ESP of the previous stack in
12411- case of nested (IRQ) stacks
12412- */
12413- __u8 supervisor_stack[0];
12414-#endif
12415+ unsigned long lowest_stack;
12416 unsigned int sig_on_uaccess_error:1;
12417 unsigned int uaccess_err:1; /* uaccess failed */
12418 };
12419
12420-#define INIT_THREAD_INFO(tsk) \
12421+#define INIT_THREAD_INFO \
12422 { \
12423- .task = &tsk, \
12424 .exec_domain = &default_exec_domain, \
12425 .flags = 0, \
12426 .cpu = 0, \
12427@@ -57,7 +51,7 @@ struct thread_info {
12428 }, \
12429 }
12430
12431-#define init_thread_info (init_thread_union.thread_info)
12432+#define init_thread_info (init_thread_union.stack)
12433 #define init_stack (init_thread_union.stack)
12434
12435 #else /* !__ASSEMBLY__ */
12436@@ -97,6 +91,7 @@ struct thread_info {
12437 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
12438 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
12439 #define TIF_X32 30 /* 32-bit native x86-64 binary */
12440+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
12441
12442 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
12443 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
12444@@ -120,16 +115,18 @@ struct thread_info {
12445 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
12446 #define _TIF_ADDR32 (1 << TIF_ADDR32)
12447 #define _TIF_X32 (1 << TIF_X32)
12448+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
12449
12450 /* work to do in syscall_trace_enter() */
12451 #define _TIF_WORK_SYSCALL_ENTRY \
12452 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
12453- _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
12454+ _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
12455+ _TIF_GRSEC_SETXID)
12456
12457 /* work to do in syscall_trace_leave() */
12458 #define _TIF_WORK_SYSCALL_EXIT \
12459 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
12460- _TIF_SYSCALL_TRACEPOINT)
12461+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
12462
12463 /* work to do on interrupt/exception return */
12464 #define _TIF_WORK_MASK \
12465@@ -139,7 +136,8 @@ struct thread_info {
12466
12467 /* work to do on any return to user space */
12468 #define _TIF_ALLWORK_MASK \
12469- ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT)
12470+ ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
12471+ _TIF_GRSEC_SETXID)
12472
12473 /* Only used for 64 bit */
12474 #define _TIF_DO_NOTIFY_MASK \
12475@@ -173,45 +171,40 @@ struct thread_info {
12476 ret; \
12477 })
12478
12479-#ifdef CONFIG_X86_32
12480-
12481-#define STACK_WARN (THREAD_SIZE/8)
12482-/*
12483- * macros/functions for gaining access to the thread information structure
12484- *
12485- * preempt_count needs to be 1 initially, until the scheduler is functional.
12486- */
12487-#ifndef __ASSEMBLY__
12488-
12489-
12490-/* how to get the current stack pointer from C */
12491-register unsigned long current_stack_pointer asm("esp") __used;
12492-
12493-/* how to get the thread information struct from C */
12494-static inline struct thread_info *current_thread_info(void)
12495-{
12496- return (struct thread_info *)
12497- (current_stack_pointer & ~(THREAD_SIZE - 1));
12498-}
12499-
12500-#else /* !__ASSEMBLY__ */
12501-
12502+#ifdef __ASSEMBLY__
12503 /* how to get the thread information struct from ASM */
12504 #define GET_THREAD_INFO(reg) \
12505- movl $-THREAD_SIZE, reg; \
12506- andl %esp, reg
12507+ mov PER_CPU_VAR(current_tinfo), reg
12508
12509 /* use this one if reg already contains %esp */
12510-#define GET_THREAD_INFO_WITH_ESP(reg) \
12511- andl $-THREAD_SIZE, reg
12512+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12513+#else
12514+/* how to get the thread information struct from C */
12515+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12516+
12517+static __always_inline struct thread_info *current_thread_info(void)
12518+{
12519+ return percpu_read_stable(current_tinfo);
12520+}
12521+#endif
12522+
12523+#ifdef CONFIG_X86_32
12524+
12525+#define STACK_WARN (THREAD_SIZE/8)
12526+/*
12527+ * macros/functions for gaining access to the thread information structure
12528+ *
12529+ * preempt_count needs to be 1 initially, until the scheduler is functional.
12530+ */
12531+#ifndef __ASSEMBLY__
12532+
12533+/* how to get the current stack pointer from C */
12534+register unsigned long current_stack_pointer asm("esp") __used;
12535
12536 #endif
12537
12538 #else /* X86_32 */
12539
12540-#include <asm/percpu.h>
12541-#define KERNEL_STACK_OFFSET (5*8)
12542-
12543 /*
12544 * macros/functions for gaining access to the thread information structure
12545 * preempt_count needs to be 1 initially, until the scheduler is functional.
12546@@ -219,27 +212,8 @@ static inline struct thread_info *current_thread_info(void)
12547 #ifndef __ASSEMBLY__
12548 DECLARE_PER_CPU(unsigned long, kernel_stack);
12549
12550-static inline struct thread_info *current_thread_info(void)
12551-{
12552- struct thread_info *ti;
12553- ti = (void *)(percpu_read_stable(kernel_stack) +
12554- KERNEL_STACK_OFFSET - THREAD_SIZE);
12555- return ti;
12556-}
12557-
12558-#else /* !__ASSEMBLY__ */
12559-
12560-/* how to get the thread information struct from ASM */
12561-#define GET_THREAD_INFO(reg) \
12562- movq PER_CPU_VAR(kernel_stack),reg ; \
12563- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12564-
12565-/*
12566- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
12567- * a certain register (to be used in assembler memory operands).
12568- */
12569-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
12570-
12571+/* how to get the current stack pointer from C */
12572+register unsigned long current_stack_pointer asm("rsp") __used;
12573 #endif
12574
12575 #endif /* !X86_32 */
12576@@ -285,5 +259,16 @@ extern void arch_task_cache_init(void);
12577 extern void free_thread_info(struct thread_info *ti);
12578 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12579 #define arch_task_cache_init arch_task_cache_init
12580+
12581+#define __HAVE_THREAD_FUNCTIONS
12582+#define task_thread_info(task) (&(task)->tinfo)
12583+#define task_stack_page(task) ((task)->stack)
12584+#define setup_thread_stack(p, org) do {} while (0)
12585+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12586+
12587+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12588+extern struct task_struct *alloc_task_struct_node(int node);
12589+extern void free_task_struct(struct task_struct *);
12590+
12591 #endif
12592 #endif /* _ASM_X86_THREAD_INFO_H */
12593diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12594index e054459..14bc8a7 100644
12595--- a/arch/x86/include/asm/uaccess.h
12596+++ b/arch/x86/include/asm/uaccess.h
12597@@ -7,12 +7,15 @@
12598 #include <linux/compiler.h>
12599 #include <linux/thread_info.h>
12600 #include <linux/string.h>
12601+#include <linux/sched.h>
12602 #include <asm/asm.h>
12603 #include <asm/page.h>
12604
12605 #define VERIFY_READ 0
12606 #define VERIFY_WRITE 1
12607
12608+extern void check_object_size(const void *ptr, unsigned long n, bool to);
12609+
12610 /*
12611 * The fs value determines whether argument validity checking should be
12612 * performed or not. If get_fs() == USER_DS, checking is performed, with
12613@@ -28,7 +31,12 @@
12614
12615 #define get_ds() (KERNEL_DS)
12616 #define get_fs() (current_thread_info()->addr_limit)
12617+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12618+void __set_fs(mm_segment_t x);
12619+void set_fs(mm_segment_t x);
12620+#else
12621 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12622+#endif
12623
12624 #define segment_eq(a, b) ((a).seg == (b).seg)
12625
12626@@ -76,7 +84,33 @@
12627 * checks that the pointer is in the user space range - after calling
12628 * this function, memory access functions may still return -EFAULT.
12629 */
12630-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12631+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12632+#define access_ok(type, addr, size) \
12633+({ \
12634+ long __size = size; \
12635+ unsigned long __addr = (unsigned long)addr; \
12636+ unsigned long __addr_ao = __addr & PAGE_MASK; \
12637+ unsigned long __end_ao = __addr + __size - 1; \
12638+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12639+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12640+ while(__addr_ao <= __end_ao) { \
12641+ char __c_ao; \
12642+ __addr_ao += PAGE_SIZE; \
12643+ if (__size > PAGE_SIZE) \
12644+ cond_resched(); \
12645+ if (__get_user(__c_ao, (char __user *)__addr)) \
12646+ break; \
12647+ if (type != VERIFY_WRITE) { \
12648+ __addr = __addr_ao; \
12649+ continue; \
12650+ } \
12651+ if (__put_user(__c_ao, (char __user *)__addr)) \
12652+ break; \
12653+ __addr = __addr_ao; \
12654+ } \
12655+ } \
12656+ __ret_ao; \
12657+})
12658
12659 /*
12660 * The exception table consists of pairs of addresses: the first is the
12661@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
12662 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12663 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12664
12665-
12666+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12667+#define __copyuser_seg "gs;"
12668+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12669+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12670+#else
12671+#define __copyuser_seg
12672+#define __COPYUSER_SET_ES
12673+#define __COPYUSER_RESTORE_ES
12674+#endif
12675
12676 #ifdef CONFIG_X86_32
12677 #define __put_user_asm_u64(x, addr, err, errret) \
12678- asm volatile("1: movl %%eax,0(%2)\n" \
12679- "2: movl %%edx,4(%2)\n" \
12680+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12681+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12682 "3:\n" \
12683 ".section .fixup,\"ax\"\n" \
12684 "4: movl %3,%0\n" \
12685@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
12686 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12687
12688 #define __put_user_asm_ex_u64(x, addr) \
12689- asm volatile("1: movl %%eax,0(%1)\n" \
12690- "2: movl %%edx,4(%1)\n" \
12691+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12692+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12693 "3:\n" \
12694 _ASM_EXTABLE(1b, 2b - 1b) \
12695 _ASM_EXTABLE(2b, 3b - 2b) \
12696@@ -252,7 +294,7 @@ extern void __put_user_8(void);
12697 __typeof__(*(ptr)) __pu_val; \
12698 __chk_user_ptr(ptr); \
12699 might_fault(); \
12700- __pu_val = x; \
12701+ __pu_val = (x); \
12702 switch (sizeof(*(ptr))) { \
12703 case 1: \
12704 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12705@@ -373,7 +415,7 @@ do { \
12706 } while (0)
12707
12708 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12709- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12710+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12711 "2:\n" \
12712 ".section .fixup,\"ax\"\n" \
12713 "3: mov %3,%0\n" \
12714@@ -381,7 +423,7 @@ do { \
12715 " jmp 2b\n" \
12716 ".previous\n" \
12717 _ASM_EXTABLE(1b, 3b) \
12718- : "=r" (err), ltype(x) \
12719+ : "=r" (err), ltype (x) \
12720 : "m" (__m(addr)), "i" (errret), "0" (err))
12721
12722 #define __get_user_size_ex(x, ptr, size) \
12723@@ -406,7 +448,7 @@ do { \
12724 } while (0)
12725
12726 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12727- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12728+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12729 "2:\n" \
12730 _ASM_EXTABLE(1b, 2b - 1b) \
12731 : ltype(x) : "m" (__m(addr)))
12732@@ -423,13 +465,24 @@ do { \
12733 int __gu_err; \
12734 unsigned long __gu_val; \
12735 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12736- (x) = (__force __typeof__(*(ptr)))__gu_val; \
12737+ (x) = (__typeof__(*(ptr)))__gu_val; \
12738 __gu_err; \
12739 })
12740
12741 /* FIXME: this hack is definitely wrong -AK */
12742 struct __large_struct { unsigned long buf[100]; };
12743-#define __m(x) (*(struct __large_struct __user *)(x))
12744+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12745+#define ____m(x) \
12746+({ \
12747+ unsigned long ____x = (unsigned long)(x); \
12748+ if (____x < PAX_USER_SHADOW_BASE) \
12749+ ____x += PAX_USER_SHADOW_BASE; \
12750+ (void __user *)____x; \
12751+})
12752+#else
12753+#define ____m(x) (x)
12754+#endif
12755+#define __m(x) (*(struct __large_struct __user *)____m(x))
12756
12757 /*
12758 * Tell gcc we read from memory instead of writing: this is because
12759@@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
12760 * aliasing issues.
12761 */
12762 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12763- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12764+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12765 "2:\n" \
12766 ".section .fixup,\"ax\"\n" \
12767 "3: mov %3,%0\n" \
12768@@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
12769 ".previous\n" \
12770 _ASM_EXTABLE(1b, 3b) \
12771 : "=r"(err) \
12772- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12773+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12774
12775 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12776- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12777+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12778 "2:\n" \
12779 _ASM_EXTABLE(1b, 2b - 1b) \
12780 : : ltype(x), "m" (__m(addr)))
12781@@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
12782 * On error, the variable @x is set to zero.
12783 */
12784
12785+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12786+#define __get_user(x, ptr) get_user((x), (ptr))
12787+#else
12788 #define __get_user(x, ptr) \
12789 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12790+#endif
12791
12792 /**
12793 * __put_user: - Write a simple value into user space, with less checking.
12794@@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
12795 * Returns zero on success, or -EFAULT on error.
12796 */
12797
12798+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12799+#define __put_user(x, ptr) put_user((x), (ptr))
12800+#else
12801 #define __put_user(x, ptr) \
12802 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12803+#endif
12804
12805 #define __get_user_unaligned __get_user
12806 #define __put_user_unaligned __put_user
12807@@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
12808 #define get_user_ex(x, ptr) do { \
12809 unsigned long __gue_val; \
12810 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12811- (x) = (__force __typeof__(*(ptr)))__gue_val; \
12812+ (x) = (__typeof__(*(ptr)))__gue_val; \
12813 } while (0)
12814
12815 #ifdef CONFIG_X86_WP_WORKS_OK
12816diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12817index 8084bc7..cc139cb 100644
12818--- a/arch/x86/include/asm/uaccess_32.h
12819+++ b/arch/x86/include/asm/uaccess_32.h
12820@@ -11,15 +11,15 @@
12821 #include <asm/page.h>
12822
12823 unsigned long __must_check __copy_to_user_ll
12824- (void __user *to, const void *from, unsigned long n);
12825+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
12826 unsigned long __must_check __copy_from_user_ll
12827- (void *to, const void __user *from, unsigned long n);
12828+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12829 unsigned long __must_check __copy_from_user_ll_nozero
12830- (void *to, const void __user *from, unsigned long n);
12831+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12832 unsigned long __must_check __copy_from_user_ll_nocache
12833- (void *to, const void __user *from, unsigned long n);
12834+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12835 unsigned long __must_check __copy_from_user_ll_nocache_nozero
12836- (void *to, const void __user *from, unsigned long n);
12837+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12838
12839 /**
12840 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
12841@@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12842 static __always_inline unsigned long __must_check
12843 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12844 {
12845+ if ((long)n < 0)
12846+ return n;
12847+
12848 if (__builtin_constant_p(n)) {
12849 unsigned long ret;
12850
12851@@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12852 return ret;
12853 }
12854 }
12855+ if (!__builtin_constant_p(n))
12856+ check_object_size(from, n, true);
12857 return __copy_to_user_ll(to, from, n);
12858 }
12859
12860@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
12861 __copy_to_user(void __user *to, const void *from, unsigned long n)
12862 {
12863 might_fault();
12864+
12865 return __copy_to_user_inatomic(to, from, n);
12866 }
12867
12868 static __always_inline unsigned long
12869 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12870 {
12871+ if ((long)n < 0)
12872+ return n;
12873+
12874 /* Avoid zeroing the tail if the copy fails..
12875 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12876 * but as the zeroing behaviour is only significant when n is not
12877@@ -137,6 +146,10 @@ static __always_inline unsigned long
12878 __copy_from_user(void *to, const void __user *from, unsigned long n)
12879 {
12880 might_fault();
12881+
12882+ if ((long)n < 0)
12883+ return n;
12884+
12885 if (__builtin_constant_p(n)) {
12886 unsigned long ret;
12887
12888@@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12889 return ret;
12890 }
12891 }
12892+ if (!__builtin_constant_p(n))
12893+ check_object_size(to, n, false);
12894 return __copy_from_user_ll(to, from, n);
12895 }
12896
12897@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12898 const void __user *from, unsigned long n)
12899 {
12900 might_fault();
12901+
12902+ if ((long)n < 0)
12903+ return n;
12904+
12905 if (__builtin_constant_p(n)) {
12906 unsigned long ret;
12907
12908@@ -181,15 +200,19 @@ static __always_inline unsigned long
12909 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12910 unsigned long n)
12911 {
12912- return __copy_from_user_ll_nocache_nozero(to, from, n);
12913+ if ((long)n < 0)
12914+ return n;
12915+
12916+ return __copy_from_user_ll_nocache_nozero(to, from, n);
12917 }
12918
12919-unsigned long __must_check copy_to_user(void __user *to,
12920- const void *from, unsigned long n);
12921-unsigned long __must_check _copy_from_user(void *to,
12922- const void __user *from,
12923- unsigned long n);
12924-
12925+extern void copy_to_user_overflow(void)
12926+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12927+ __compiletime_error("copy_to_user() buffer size is not provably correct")
12928+#else
12929+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
12930+#endif
12931+;
12932
12933 extern void copy_from_user_overflow(void)
12934 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12935@@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
12936 #endif
12937 ;
12938
12939-static inline unsigned long __must_check copy_from_user(void *to,
12940- const void __user *from,
12941- unsigned long n)
12942+/**
12943+ * copy_to_user: - Copy a block of data into user space.
12944+ * @to: Destination address, in user space.
12945+ * @from: Source address, in kernel space.
12946+ * @n: Number of bytes to copy.
12947+ *
12948+ * Context: User context only. This function may sleep.
12949+ *
12950+ * Copy data from kernel space to user space.
12951+ *
12952+ * Returns number of bytes that could not be copied.
12953+ * On success, this will be zero.
12954+ */
12955+static inline unsigned long __must_check
12956+copy_to_user(void __user *to, const void *from, unsigned long n)
12957+{
12958+ int sz = __compiletime_object_size(from);
12959+
12960+ if (unlikely(sz != -1 && sz < n))
12961+ copy_to_user_overflow();
12962+ else if (access_ok(VERIFY_WRITE, to, n))
12963+ n = __copy_to_user(to, from, n);
12964+ return n;
12965+}
12966+
12967+/**
12968+ * copy_from_user: - Copy a block of data from user space.
12969+ * @to: Destination address, in kernel space.
12970+ * @from: Source address, in user space.
12971+ * @n: Number of bytes to copy.
12972+ *
12973+ * Context: User context only. This function may sleep.
12974+ *
12975+ * Copy data from user space to kernel space.
12976+ *
12977+ * Returns number of bytes that could not be copied.
12978+ * On success, this will be zero.
12979+ *
12980+ * If some data could not be copied, this function will pad the copied
12981+ * data to the requested size using zero bytes.
12982+ */
12983+static inline unsigned long __must_check
12984+copy_from_user(void *to, const void __user *from, unsigned long n)
12985 {
12986 int sz = __compiletime_object_size(to);
12987
12988- if (likely(sz == -1 || sz >= n))
12989- n = _copy_from_user(to, from, n);
12990- else
12991+ if (unlikely(sz != -1 && sz < n))
12992 copy_from_user_overflow();
12993-
12994+ else if (access_ok(VERIFY_READ, from, n))
12995+ n = __copy_from_user(to, from, n);
12996+ else if ((long)n > 0) {
12997+ if (!__builtin_constant_p(n))
12998+ check_object_size(to, n, false);
12999+ memset(to, 0, n);
13000+ }
13001 return n;
13002 }
13003
13004@@ -230,7 +297,7 @@ static inline unsigned long __must_check copy_from_user(void *to,
13005 #define strlen_user(str) strnlen_user(str, LONG_MAX)
13006
13007 long strnlen_user(const char __user *str, long n);
13008-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
13009-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
13010+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13011+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13012
13013 #endif /* _ASM_X86_UACCESS_32_H */
13014diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
13015index fcd4b6f..1d52af4 100644
13016--- a/arch/x86/include/asm/uaccess_64.h
13017+++ b/arch/x86/include/asm/uaccess_64.h
13018@@ -10,6 +10,9 @@
13019 #include <asm/alternative.h>
13020 #include <asm/cpufeature.h>
13021 #include <asm/page.h>
13022+#include <asm/pgtable.h>
13023+
13024+#define set_fs(x) (current_thread_info()->addr_limit = (x))
13025
13026 /*
13027 * Copy To/From Userspace
13028@@ -17,12 +20,14 @@
13029
13030 /* Handles exceptions in both to and from, but doesn't do access_ok */
13031 __must_check unsigned long
13032-copy_user_generic_string(void *to, const void *from, unsigned len);
13033+copy_user_generic_string(void *to, const void *from, unsigned long len) __size_overflow(3);
13034 __must_check unsigned long
13035-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
13036+copy_user_generic_unrolled(void *to, const void *from, unsigned long len) __size_overflow(3);
13037
13038 static __always_inline __must_check unsigned long
13039-copy_user_generic(void *to, const void *from, unsigned len)
13040+copy_user_generic(void *to, const void *from, unsigned long len) __size_overflow(3);
13041+static __always_inline __must_check unsigned long
13042+copy_user_generic(void *to, const void *from, unsigned long len)
13043 {
13044 unsigned ret;
13045
13046@@ -32,142 +37,238 @@ copy_user_generic(void *to, const void *from, unsigned len)
13047 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
13048 "=d" (len)),
13049 "1" (to), "2" (from), "3" (len)
13050- : "memory", "rcx", "r8", "r9", "r10", "r11");
13051+ : "memory", "rcx", "r8", "r9", "r11");
13052 return ret;
13053 }
13054
13055+static __always_inline __must_check unsigned long
13056+__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
13057+static __always_inline __must_check unsigned long
13058+__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
13059 __must_check unsigned long
13060-_copy_to_user(void __user *to, const void *from, unsigned len);
13061-__must_check unsigned long
13062-_copy_from_user(void *to, const void __user *from, unsigned len);
13063-__must_check unsigned long
13064-copy_in_user(void __user *to, const void __user *from, unsigned len);
13065+copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
13066+
13067+extern void copy_to_user_overflow(void)
13068+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
13069+ __compiletime_error("copy_to_user() buffer size is not provably correct")
13070+#else
13071+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
13072+#endif
13073+;
13074+
13075+extern void copy_from_user_overflow(void)
13076+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
13077+ __compiletime_error("copy_from_user() buffer size is not provably correct")
13078+#else
13079+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
13080+#endif
13081+;
13082
13083 static inline unsigned long __must_check copy_from_user(void *to,
13084 const void __user *from,
13085 unsigned long n)
13086 {
13087- int sz = __compiletime_object_size(to);
13088-
13089 might_fault();
13090- if (likely(sz == -1 || sz >= n))
13091- n = _copy_from_user(to, from, n);
13092-#ifdef CONFIG_DEBUG_VM
13093- else
13094- WARN(1, "Buffer overflow detected!\n");
13095-#endif
13096+
13097+ if (access_ok(VERIFY_READ, from, n))
13098+ n = __copy_from_user(to, from, n);
13099+ else if (n < INT_MAX) {
13100+ if (!__builtin_constant_p(n))
13101+ check_object_size(to, n, false);
13102+ memset(to, 0, n);
13103+ }
13104 return n;
13105 }
13106
13107 static __always_inline __must_check
13108-int copy_to_user(void __user *dst, const void *src, unsigned size)
13109+int copy_to_user(void __user *dst, const void *src, unsigned long size)
13110 {
13111 might_fault();
13112
13113- return _copy_to_user(dst, src, size);
13114+ if (access_ok(VERIFY_WRITE, dst, size))
13115+ size = __copy_to_user(dst, src, size);
13116+ return size;
13117 }
13118
13119 static __always_inline __must_check
13120-int __copy_from_user(void *dst, const void __user *src, unsigned size)
13121+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
13122 {
13123- int ret = 0;
13124+ int sz = __compiletime_object_size(dst);
13125+ unsigned ret = 0;
13126
13127 might_fault();
13128- if (!__builtin_constant_p(size))
13129- return copy_user_generic(dst, (__force void *)src, size);
13130+
13131+ if (size > INT_MAX)
13132+ return size;
13133+
13134+#ifdef CONFIG_PAX_MEMORY_UDEREF
13135+ if (!__access_ok(VERIFY_READ, src, size))
13136+ return size;
13137+#endif
13138+
13139+ if (unlikely(sz != -1 && sz < size)) {
13140+ copy_from_user_overflow();
13141+ return size;
13142+ }
13143+
13144+ if (!__builtin_constant_p(size)) {
13145+ check_object_size(dst, size, false);
13146+
13147+#ifdef CONFIG_PAX_MEMORY_UDEREF
13148+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13149+ src += PAX_USER_SHADOW_BASE;
13150+#endif
13151+
13152+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
13153+ }
13154 switch (size) {
13155- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
13156+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
13157 ret, "b", "b", "=q", 1);
13158 return ret;
13159- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
13160+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
13161 ret, "w", "w", "=r", 2);
13162 return ret;
13163- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
13164+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
13165 ret, "l", "k", "=r", 4);
13166 return ret;
13167- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
13168+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13169 ret, "q", "", "=r", 8);
13170 return ret;
13171 case 10:
13172- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13173+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13174 ret, "q", "", "=r", 10);
13175 if (unlikely(ret))
13176 return ret;
13177 __get_user_asm(*(u16 *)(8 + (char *)dst),
13178- (u16 __user *)(8 + (char __user *)src),
13179+ (const u16 __user *)(8 + (const char __user *)src),
13180 ret, "w", "w", "=r", 2);
13181 return ret;
13182 case 16:
13183- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13184+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13185 ret, "q", "", "=r", 16);
13186 if (unlikely(ret))
13187 return ret;
13188 __get_user_asm(*(u64 *)(8 + (char *)dst),
13189- (u64 __user *)(8 + (char __user *)src),
13190+ (const u64 __user *)(8 + (const char __user *)src),
13191 ret, "q", "", "=r", 8);
13192 return ret;
13193 default:
13194- return copy_user_generic(dst, (__force void *)src, size);
13195+
13196+#ifdef CONFIG_PAX_MEMORY_UDEREF
13197+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13198+ src += PAX_USER_SHADOW_BASE;
13199+#endif
13200+
13201+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
13202 }
13203 }
13204
13205 static __always_inline __must_check
13206-int __copy_to_user(void __user *dst, const void *src, unsigned size)
13207+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
13208 {
13209- int ret = 0;
13210+ int sz = __compiletime_object_size(src);
13211+ unsigned ret = 0;
13212
13213 might_fault();
13214- if (!__builtin_constant_p(size))
13215- return copy_user_generic((__force void *)dst, src, size);
13216+
13217+ if (size > INT_MAX)
13218+ return size;
13219+
13220+#ifdef CONFIG_PAX_MEMORY_UDEREF
13221+ if (!__access_ok(VERIFY_WRITE, dst, size))
13222+ return size;
13223+#endif
13224+
13225+ if (unlikely(sz != -1 && sz < size)) {
13226+ copy_to_user_overflow();
13227+ return size;
13228+ }
13229+
13230+ if (!__builtin_constant_p(size)) {
13231+ check_object_size(src, size, true);
13232+
13233+#ifdef CONFIG_PAX_MEMORY_UDEREF
13234+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13235+ dst += PAX_USER_SHADOW_BASE;
13236+#endif
13237+
13238+ return copy_user_generic((__force_kernel void *)dst, src, size);
13239+ }
13240 switch (size) {
13241- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
13242+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
13243 ret, "b", "b", "iq", 1);
13244 return ret;
13245- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
13246+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
13247 ret, "w", "w", "ir", 2);
13248 return ret;
13249- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
13250+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
13251 ret, "l", "k", "ir", 4);
13252 return ret;
13253- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
13254+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13255 ret, "q", "", "er", 8);
13256 return ret;
13257 case 10:
13258- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13259+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13260 ret, "q", "", "er", 10);
13261 if (unlikely(ret))
13262 return ret;
13263 asm("":::"memory");
13264- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
13265+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
13266 ret, "w", "w", "ir", 2);
13267 return ret;
13268 case 16:
13269- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13270+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13271 ret, "q", "", "er", 16);
13272 if (unlikely(ret))
13273 return ret;
13274 asm("":::"memory");
13275- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
13276+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
13277 ret, "q", "", "er", 8);
13278 return ret;
13279 default:
13280- return copy_user_generic((__force void *)dst, src, size);
13281+
13282+#ifdef CONFIG_PAX_MEMORY_UDEREF
13283+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13284+ dst += PAX_USER_SHADOW_BASE;
13285+#endif
13286+
13287+ return copy_user_generic((__force_kernel void *)dst, src, size);
13288 }
13289 }
13290
13291 static __always_inline __must_check
13292-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13293+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
13294 {
13295- int ret = 0;
13296+ unsigned ret = 0;
13297
13298 might_fault();
13299- if (!__builtin_constant_p(size))
13300- return copy_user_generic((__force void *)dst,
13301- (__force void *)src, size);
13302+
13303+ if (size > INT_MAX)
13304+ return size;
13305+
13306+#ifdef CONFIG_PAX_MEMORY_UDEREF
13307+ if (!__access_ok(VERIFY_READ, src, size))
13308+ return size;
13309+ if (!__access_ok(VERIFY_WRITE, dst, size))
13310+ return size;
13311+#endif
13312+
13313+ if (!__builtin_constant_p(size)) {
13314+
13315+#ifdef CONFIG_PAX_MEMORY_UDEREF
13316+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13317+ src += PAX_USER_SHADOW_BASE;
13318+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13319+ dst += PAX_USER_SHADOW_BASE;
13320+#endif
13321+
13322+ return copy_user_generic((__force_kernel void *)dst,
13323+ (__force_kernel const void *)src, size);
13324+ }
13325 switch (size) {
13326 case 1: {
13327 u8 tmp;
13328- __get_user_asm(tmp, (u8 __user *)src,
13329+ __get_user_asm(tmp, (const u8 __user *)src,
13330 ret, "b", "b", "=q", 1);
13331 if (likely(!ret))
13332 __put_user_asm(tmp, (u8 __user *)dst,
13333@@ -176,7 +277,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13334 }
13335 case 2: {
13336 u16 tmp;
13337- __get_user_asm(tmp, (u16 __user *)src,
13338+ __get_user_asm(tmp, (const u16 __user *)src,
13339 ret, "w", "w", "=r", 2);
13340 if (likely(!ret))
13341 __put_user_asm(tmp, (u16 __user *)dst,
13342@@ -186,7 +287,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13343
13344 case 4: {
13345 u32 tmp;
13346- __get_user_asm(tmp, (u32 __user *)src,
13347+ __get_user_asm(tmp, (const u32 __user *)src,
13348 ret, "l", "k", "=r", 4);
13349 if (likely(!ret))
13350 __put_user_asm(tmp, (u32 __user *)dst,
13351@@ -195,7 +296,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13352 }
13353 case 8: {
13354 u64 tmp;
13355- __get_user_asm(tmp, (u64 __user *)src,
13356+ __get_user_asm(tmp, (const u64 __user *)src,
13357 ret, "q", "", "=r", 8);
13358 if (likely(!ret))
13359 __put_user_asm(tmp, (u64 __user *)dst,
13360@@ -203,47 +304,92 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13361 return ret;
13362 }
13363 default:
13364- return copy_user_generic((__force void *)dst,
13365- (__force void *)src, size);
13366+
13367+#ifdef CONFIG_PAX_MEMORY_UDEREF
13368+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13369+ src += PAX_USER_SHADOW_BASE;
13370+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13371+ dst += PAX_USER_SHADOW_BASE;
13372+#endif
13373+
13374+ return copy_user_generic((__force_kernel void *)dst,
13375+ (__force_kernel const void *)src, size);
13376 }
13377 }
13378
13379 __must_check long strnlen_user(const char __user *str, long n);
13380 __must_check long __strnlen_user(const char __user *str, long n);
13381 __must_check long strlen_user(const char __user *str);
13382-__must_check unsigned long clear_user(void __user *mem, unsigned long len);
13383-__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
13384+__must_check unsigned long clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13385+__must_check unsigned long __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13386
13387 static __must_check __always_inline int
13388-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
13389+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
13390 {
13391- return copy_user_generic(dst, (__force const void *)src, size);
13392+ if (size > INT_MAX)
13393+ return size;
13394+
13395+#ifdef CONFIG_PAX_MEMORY_UDEREF
13396+ if (!__access_ok(VERIFY_READ, src, size))
13397+ return size;
13398+
13399+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13400+ src += PAX_USER_SHADOW_BASE;
13401+#endif
13402+
13403+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
13404 }
13405
13406-static __must_check __always_inline int
13407-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
13408+static __must_check __always_inline unsigned long
13409+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
13410 {
13411- return copy_user_generic((__force void *)dst, src, size);
13412+ if (size > INT_MAX)
13413+ return size;
13414+
13415+#ifdef CONFIG_PAX_MEMORY_UDEREF
13416+ if (!__access_ok(VERIFY_WRITE, dst, size))
13417+ return size;
13418+
13419+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13420+ dst += PAX_USER_SHADOW_BASE;
13421+#endif
13422+
13423+ return copy_user_generic((__force_kernel void *)dst, src, size);
13424 }
13425
13426-extern long __copy_user_nocache(void *dst, const void __user *src,
13427- unsigned size, int zerorest);
13428+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
13429+ unsigned long size, int zerorest) __size_overflow(3);
13430
13431-static inline int
13432-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
13433+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
13434 {
13435 might_sleep();
13436+
13437+ if (size > INT_MAX)
13438+ return size;
13439+
13440+#ifdef CONFIG_PAX_MEMORY_UDEREF
13441+ if (!__access_ok(VERIFY_READ, src, size))
13442+ return size;
13443+#endif
13444+
13445 return __copy_user_nocache(dst, src, size, 1);
13446 }
13447
13448-static inline int
13449-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13450- unsigned size)
13451+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13452+ unsigned long size)
13453 {
13454+ if (size > INT_MAX)
13455+ return size;
13456+
13457+#ifdef CONFIG_PAX_MEMORY_UDEREF
13458+ if (!__access_ok(VERIFY_READ, src, size))
13459+ return size;
13460+#endif
13461+
13462 return __copy_user_nocache(dst, src, size, 0);
13463 }
13464
13465-unsigned long
13466-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
13467+extern unsigned long
13468+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
13469
13470 #endif /* _ASM_X86_UACCESS_64_H */
13471diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
13472index bb05228..d763d5b 100644
13473--- a/arch/x86/include/asm/vdso.h
13474+++ b/arch/x86/include/asm/vdso.h
13475@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
13476 #define VDSO32_SYMBOL(base, name) \
13477 ({ \
13478 extern const char VDSO32_##name[]; \
13479- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13480+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13481 })
13482 #endif
13483
13484diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13485index 764b66a..ad3cfc8 100644
13486--- a/arch/x86/include/asm/x86_init.h
13487+++ b/arch/x86/include/asm/x86_init.h
13488@@ -29,7 +29,7 @@ struct x86_init_mpparse {
13489 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13490 void (*find_smp_config)(void);
13491 void (*get_smp_config)(unsigned int early);
13492-};
13493+} __no_const;
13494
13495 /**
13496 * struct x86_init_resources - platform specific resource related ops
13497@@ -43,7 +43,7 @@ struct x86_init_resources {
13498 void (*probe_roms)(void);
13499 void (*reserve_resources)(void);
13500 char *(*memory_setup)(void);
13501-};
13502+} __no_const;
13503
13504 /**
13505 * struct x86_init_irqs - platform specific interrupt setup
13506@@ -56,7 +56,7 @@ struct x86_init_irqs {
13507 void (*pre_vector_init)(void);
13508 void (*intr_init)(void);
13509 void (*trap_init)(void);
13510-};
13511+} __no_const;
13512
13513 /**
13514 * struct x86_init_oem - oem platform specific customizing functions
13515@@ -66,7 +66,7 @@ struct x86_init_irqs {
13516 struct x86_init_oem {
13517 void (*arch_setup)(void);
13518 void (*banner)(void);
13519-};
13520+} __no_const;
13521
13522 /**
13523 * struct x86_init_mapping - platform specific initial kernel pagetable setup
13524@@ -77,7 +77,7 @@ struct x86_init_oem {
13525 */
13526 struct x86_init_mapping {
13527 void (*pagetable_reserve)(u64 start, u64 end);
13528-};
13529+} __no_const;
13530
13531 /**
13532 * struct x86_init_paging - platform specific paging functions
13533@@ -87,7 +87,7 @@ struct x86_init_mapping {
13534 struct x86_init_paging {
13535 void (*pagetable_setup_start)(pgd_t *base);
13536 void (*pagetable_setup_done)(pgd_t *base);
13537-};
13538+} __no_const;
13539
13540 /**
13541 * struct x86_init_timers - platform specific timer setup
13542@@ -102,7 +102,7 @@ struct x86_init_timers {
13543 void (*tsc_pre_init)(void);
13544 void (*timer_init)(void);
13545 void (*wallclock_init)(void);
13546-};
13547+} __no_const;
13548
13549 /**
13550 * struct x86_init_iommu - platform specific iommu setup
13551@@ -110,7 +110,7 @@ struct x86_init_timers {
13552 */
13553 struct x86_init_iommu {
13554 int (*iommu_init)(void);
13555-};
13556+} __no_const;
13557
13558 /**
13559 * struct x86_init_pci - platform specific pci init functions
13560@@ -124,7 +124,7 @@ struct x86_init_pci {
13561 int (*init)(void);
13562 void (*init_irq)(void);
13563 void (*fixup_irqs)(void);
13564-};
13565+} __no_const;
13566
13567 /**
13568 * struct x86_init_ops - functions for platform specific setup
13569@@ -140,7 +140,7 @@ struct x86_init_ops {
13570 struct x86_init_timers timers;
13571 struct x86_init_iommu iommu;
13572 struct x86_init_pci pci;
13573-};
13574+} __no_const;
13575
13576 /**
13577 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13578@@ -151,7 +151,7 @@ struct x86_cpuinit_ops {
13579 void (*setup_percpu_clockev)(void);
13580 void (*early_percpu_clock_init)(void);
13581 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
13582-};
13583+} __no_const;
13584
13585 /**
13586 * struct x86_platform_ops - platform specific runtime functions
13587@@ -177,7 +177,7 @@ struct x86_platform_ops {
13588 int (*i8042_detect)(void);
13589 void (*save_sched_clock_state)(void);
13590 void (*restore_sched_clock_state)(void);
13591-};
13592+} __no_const;
13593
13594 struct pci_dev;
13595
13596@@ -186,7 +186,7 @@ struct x86_msi_ops {
13597 void (*teardown_msi_irq)(unsigned int irq);
13598 void (*teardown_msi_irqs)(struct pci_dev *dev);
13599 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
13600-};
13601+} __no_const;
13602
13603 extern struct x86_init_ops x86_init;
13604 extern struct x86_cpuinit_ops x86_cpuinit;
13605diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13606index c6ce245..ffbdab7 100644
13607--- a/arch/x86/include/asm/xsave.h
13608+++ b/arch/x86/include/asm/xsave.h
13609@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13610 {
13611 int err;
13612
13613+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13614+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13615+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13616+#endif
13617+
13618 /*
13619 * Clear the xsave header first, so that reserved fields are
13620 * initialized to zero.
13621@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13622 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13623 {
13624 int err;
13625- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13626+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13627 u32 lmask = mask;
13628 u32 hmask = mask >> 32;
13629
13630+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13631+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13632+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13633+#endif
13634+
13635 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13636 "2:\n"
13637 ".section .fixup,\"ax\"\n"
13638diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13639index 6a564ac..9b1340c 100644
13640--- a/arch/x86/kernel/acpi/realmode/Makefile
13641+++ b/arch/x86/kernel/acpi/realmode/Makefile
13642@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13643 $(call cc-option, -fno-stack-protector) \
13644 $(call cc-option, -mpreferred-stack-boundary=2)
13645 KBUILD_CFLAGS += $(call cc-option, -m32)
13646+ifdef CONSTIFY_PLUGIN
13647+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13648+endif
13649 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13650 GCOV_PROFILE := n
13651
13652diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13653index b4fd836..4358fe3 100644
13654--- a/arch/x86/kernel/acpi/realmode/wakeup.S
13655+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13656@@ -108,6 +108,9 @@ wakeup_code:
13657 /* Do any other stuff... */
13658
13659 #ifndef CONFIG_64BIT
13660+ /* Recheck NX bit overrides (64bit path does this in trampoline */
13661+ call verify_cpu
13662+
13663 /* This could also be done in C code... */
13664 movl pmode_cr3, %eax
13665 movl %eax, %cr3
13666@@ -131,6 +134,7 @@ wakeup_code:
13667 movl pmode_cr0, %eax
13668 movl %eax, %cr0
13669 jmp pmode_return
13670+# include "../../verify_cpu.S"
13671 #else
13672 pushw $0
13673 pushw trampoline_segment
13674diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13675index 146a49c..1b5338b 100644
13676--- a/arch/x86/kernel/acpi/sleep.c
13677+++ b/arch/x86/kernel/acpi/sleep.c
13678@@ -98,8 +98,12 @@ int acpi_suspend_lowlevel(void)
13679 header->trampoline_segment = trampoline_address() >> 4;
13680 #ifdef CONFIG_SMP
13681 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13682+
13683+ pax_open_kernel();
13684 early_gdt_descr.address =
13685 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13686+ pax_close_kernel();
13687+
13688 initial_gs = per_cpu_offset(smp_processor_id());
13689 #endif
13690 initial_code = (unsigned long)wakeup_long64;
13691diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13692index 7261083..5c12053 100644
13693--- a/arch/x86/kernel/acpi/wakeup_32.S
13694+++ b/arch/x86/kernel/acpi/wakeup_32.S
13695@@ -30,13 +30,11 @@ wakeup_pmode_return:
13696 # and restore the stack ... but you need gdt for this to work
13697 movl saved_context_esp, %esp
13698
13699- movl %cs:saved_magic, %eax
13700- cmpl $0x12345678, %eax
13701+ cmpl $0x12345678, saved_magic
13702 jne bogus_magic
13703
13704 # jump to place where we left off
13705- movl saved_eip, %eax
13706- jmp *%eax
13707+ jmp *(saved_eip)
13708
13709 bogus_magic:
13710 jmp bogus_magic
13711diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13712index 1f84794..e23f862 100644
13713--- a/arch/x86/kernel/alternative.c
13714+++ b/arch/x86/kernel/alternative.c
13715@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
13716 */
13717 for (a = start; a < end; a++) {
13718 instr = (u8 *)&a->instr_offset + a->instr_offset;
13719+
13720+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13721+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13722+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
13723+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13724+#endif
13725+
13726 replacement = (u8 *)&a->repl_offset + a->repl_offset;
13727 BUG_ON(a->replacementlen > a->instrlen);
13728 BUG_ON(a->instrlen > sizeof(insnbuf));
13729@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
13730 for (poff = start; poff < end; poff++) {
13731 u8 *ptr = (u8 *)poff + *poff;
13732
13733+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13734+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13735+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13736+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13737+#endif
13738+
13739 if (!*poff || ptr < text || ptr >= text_end)
13740 continue;
13741 /* turn DS segment override prefix into lock prefix */
13742- if (*ptr == 0x3e)
13743+ if (*ktla_ktva(ptr) == 0x3e)
13744 text_poke(ptr, ((unsigned char []){0xf0}), 1);
13745 };
13746 mutex_unlock(&text_mutex);
13747@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
13748 for (poff = start; poff < end; poff++) {
13749 u8 *ptr = (u8 *)poff + *poff;
13750
13751+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13752+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13753+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13754+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13755+#endif
13756+
13757 if (!*poff || ptr < text || ptr >= text_end)
13758 continue;
13759 /* turn lock prefix into DS segment override prefix */
13760- if (*ptr == 0xf0)
13761+ if (*ktla_ktva(ptr) == 0xf0)
13762 text_poke(ptr, ((unsigned char []){0x3E}), 1);
13763 };
13764 mutex_unlock(&text_mutex);
13765@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13766
13767 BUG_ON(p->len > MAX_PATCH_LEN);
13768 /* prep the buffer with the original instructions */
13769- memcpy(insnbuf, p->instr, p->len);
13770+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13771 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13772 (unsigned long)p->instr, p->len);
13773
13774@@ -568,7 +587,7 @@ void __init alternative_instructions(void)
13775 if (smp_alt_once)
13776 free_init_pages("SMP alternatives",
13777 (unsigned long)__smp_locks,
13778- (unsigned long)__smp_locks_end);
13779+ PAGE_ALIGN((unsigned long)__smp_locks_end));
13780
13781 restart_nmi();
13782 }
13783@@ -585,13 +604,17 @@ void __init alternative_instructions(void)
13784 * instructions. And on the local CPU you need to be protected again NMI or MCE
13785 * handlers seeing an inconsistent instruction while you patch.
13786 */
13787-void *__init_or_module text_poke_early(void *addr, const void *opcode,
13788+void *__kprobes text_poke_early(void *addr, const void *opcode,
13789 size_t len)
13790 {
13791 unsigned long flags;
13792 local_irq_save(flags);
13793- memcpy(addr, opcode, len);
13794+
13795+ pax_open_kernel();
13796+ memcpy(ktla_ktva(addr), opcode, len);
13797 sync_core();
13798+ pax_close_kernel();
13799+
13800 local_irq_restore(flags);
13801 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13802 that causes hangs on some VIA CPUs. */
13803@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
13804 */
13805 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13806 {
13807- unsigned long flags;
13808- char *vaddr;
13809+ unsigned char *vaddr = ktla_ktva(addr);
13810 struct page *pages[2];
13811- int i;
13812+ size_t i;
13813
13814 if (!core_kernel_text((unsigned long)addr)) {
13815- pages[0] = vmalloc_to_page(addr);
13816- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13817+ pages[0] = vmalloc_to_page(vaddr);
13818+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13819 } else {
13820- pages[0] = virt_to_page(addr);
13821+ pages[0] = virt_to_page(vaddr);
13822 WARN_ON(!PageReserved(pages[0]));
13823- pages[1] = virt_to_page(addr + PAGE_SIZE);
13824+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13825 }
13826 BUG_ON(!pages[0]);
13827- local_irq_save(flags);
13828- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13829- if (pages[1])
13830- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13831- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13832- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13833- clear_fixmap(FIX_TEXT_POKE0);
13834- if (pages[1])
13835- clear_fixmap(FIX_TEXT_POKE1);
13836- local_flush_tlb();
13837- sync_core();
13838- /* Could also do a CLFLUSH here to speed up CPU recovery; but
13839- that causes hangs on some VIA CPUs. */
13840+ text_poke_early(addr, opcode, len);
13841 for (i = 0; i < len; i++)
13842- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13843- local_irq_restore(flags);
13844+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13845 return addr;
13846 }
13847
13848diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13849index edc2448..553e7c5 100644
13850--- a/arch/x86/kernel/apic/apic.c
13851+++ b/arch/x86/kernel/apic/apic.c
13852@@ -184,7 +184,7 @@ int first_system_vector = 0xfe;
13853 /*
13854 * Debug level, exported for io_apic.c
13855 */
13856-unsigned int apic_verbosity;
13857+int apic_verbosity;
13858
13859 int pic_mode;
13860
13861@@ -1917,7 +1917,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13862 apic_write(APIC_ESR, 0);
13863 v1 = apic_read(APIC_ESR);
13864 ack_APIC_irq();
13865- atomic_inc(&irq_err_count);
13866+ atomic_inc_unchecked(&irq_err_count);
13867
13868 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
13869 smp_processor_id(), v0 , v1);
13870diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13871index e88300d..cd5a87a 100644
13872--- a/arch/x86/kernel/apic/io_apic.c
13873+++ b/arch/x86/kernel/apic/io_apic.c
13874@@ -83,7 +83,9 @@ static struct io_apic_ops io_apic_ops = {
13875
13876 void __init set_io_apic_ops(const struct io_apic_ops *ops)
13877 {
13878- io_apic_ops = *ops;
13879+ pax_open_kernel();
13880+ memcpy((void*)&io_apic_ops, ops, sizeof io_apic_ops);
13881+ pax_close_kernel();
13882 }
13883
13884 /*
13885@@ -1135,7 +1137,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13886 }
13887 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13888
13889-void lock_vector_lock(void)
13890+void lock_vector_lock(void) __acquires(vector_lock)
13891 {
13892 /* Used to the online set of cpus does not change
13893 * during assign_irq_vector.
13894@@ -1143,7 +1145,7 @@ void lock_vector_lock(void)
13895 raw_spin_lock(&vector_lock);
13896 }
13897
13898-void unlock_vector_lock(void)
13899+void unlock_vector_lock(void) __releases(vector_lock)
13900 {
13901 raw_spin_unlock(&vector_lock);
13902 }
13903@@ -2549,7 +2551,7 @@ static void ack_apic_edge(struct irq_data *data)
13904 ack_APIC_irq();
13905 }
13906
13907-atomic_t irq_mis_count;
13908+atomic_unchecked_t irq_mis_count;
13909
13910 #ifdef CONFIG_GENERIC_PENDING_IRQ
13911 static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
13912@@ -2667,7 +2669,7 @@ static void ack_apic_level(struct irq_data *data)
13913 * at the cpu.
13914 */
13915 if (!(v & (1 << (i & 0x1f)))) {
13916- atomic_inc(&irq_mis_count);
13917+ atomic_inc_unchecked(&irq_mis_count);
13918
13919 eoi_ioapic_irq(irq, cfg);
13920 }
13921diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13922index 459e78c..f037006 100644
13923--- a/arch/x86/kernel/apm_32.c
13924+++ b/arch/x86/kernel/apm_32.c
13925@@ -410,7 +410,7 @@ static DEFINE_MUTEX(apm_mutex);
13926 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13927 * even though they are called in protected mode.
13928 */
13929-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13930+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13931 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13932
13933 static const char driver_version[] = "1.16ac"; /* no spaces */
13934@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
13935 BUG_ON(cpu != 0);
13936 gdt = get_cpu_gdt_table(cpu);
13937 save_desc_40 = gdt[0x40 / 8];
13938+
13939+ pax_open_kernel();
13940 gdt[0x40 / 8] = bad_bios_desc;
13941+ pax_close_kernel();
13942
13943 apm_irq_save(flags);
13944 APM_DO_SAVE_SEGS;
13945@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
13946 &call->esi);
13947 APM_DO_RESTORE_SEGS;
13948 apm_irq_restore(flags);
13949+
13950+ pax_open_kernel();
13951 gdt[0x40 / 8] = save_desc_40;
13952+ pax_close_kernel();
13953+
13954 put_cpu();
13955
13956 return call->eax & 0xff;
13957@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
13958 BUG_ON(cpu != 0);
13959 gdt = get_cpu_gdt_table(cpu);
13960 save_desc_40 = gdt[0x40 / 8];
13961+
13962+ pax_open_kernel();
13963 gdt[0x40 / 8] = bad_bios_desc;
13964+ pax_close_kernel();
13965
13966 apm_irq_save(flags);
13967 APM_DO_SAVE_SEGS;
13968@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
13969 &call->eax);
13970 APM_DO_RESTORE_SEGS;
13971 apm_irq_restore(flags);
13972+
13973+ pax_open_kernel();
13974 gdt[0x40 / 8] = save_desc_40;
13975+ pax_close_kernel();
13976+
13977 put_cpu();
13978 return error;
13979 }
13980@@ -2345,12 +2359,15 @@ static int __init apm_init(void)
13981 * code to that CPU.
13982 */
13983 gdt = get_cpu_gdt_table(0);
13984+
13985+ pax_open_kernel();
13986 set_desc_base(&gdt[APM_CS >> 3],
13987 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
13988 set_desc_base(&gdt[APM_CS_16 >> 3],
13989 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
13990 set_desc_base(&gdt[APM_DS >> 3],
13991 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
13992+ pax_close_kernel();
13993
13994 proc_create("apm", 0, NULL, &apm_file_ops);
13995
13996diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
13997index 68de2dc..1f3c720 100644
13998--- a/arch/x86/kernel/asm-offsets.c
13999+++ b/arch/x86/kernel/asm-offsets.c
14000@@ -33,6 +33,8 @@ void common(void) {
14001 OFFSET(TI_status, thread_info, status);
14002 OFFSET(TI_addr_limit, thread_info, addr_limit);
14003 OFFSET(TI_preempt_count, thread_info, preempt_count);
14004+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
14005+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
14006
14007 BLANK();
14008 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
14009@@ -53,8 +55,26 @@ void common(void) {
14010 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
14011 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
14012 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
14013+
14014+#ifdef CONFIG_PAX_KERNEXEC
14015+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
14016 #endif
14017
14018+#ifdef CONFIG_PAX_MEMORY_UDEREF
14019+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
14020+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
14021+#ifdef CONFIG_X86_64
14022+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
14023+#endif
14024+#endif
14025+
14026+#endif
14027+
14028+ BLANK();
14029+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
14030+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
14031+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
14032+
14033 #ifdef CONFIG_XEN
14034 BLANK();
14035 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
14036diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
14037index 1b4754f..fbb4227 100644
14038--- a/arch/x86/kernel/asm-offsets_64.c
14039+++ b/arch/x86/kernel/asm-offsets_64.c
14040@@ -76,6 +76,7 @@ int main(void)
14041 BLANK();
14042 #undef ENTRY
14043
14044+ DEFINE(TSS_size, sizeof(struct tss_struct));
14045 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
14046 BLANK();
14047
14048diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
14049index 6ab6aa2..8f71507 100644
14050--- a/arch/x86/kernel/cpu/Makefile
14051+++ b/arch/x86/kernel/cpu/Makefile
14052@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
14053 CFLAGS_REMOVE_perf_event.o = -pg
14054 endif
14055
14056-# Make sure load_percpu_segment has no stackprotector
14057-nostackp := $(call cc-option, -fno-stack-protector)
14058-CFLAGS_common.o := $(nostackp)
14059-
14060 obj-y := intel_cacheinfo.o scattered.o topology.o
14061 obj-y += proc.o capflags.o powerflags.o common.o
14062 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
14063diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
14064index 146bb62..ac9c74a 100644
14065--- a/arch/x86/kernel/cpu/amd.c
14066+++ b/arch/x86/kernel/cpu/amd.c
14067@@ -691,7 +691,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
14068 unsigned int size)
14069 {
14070 /* AMD errata T13 (order #21922) */
14071- if ((c->x86 == 6)) {
14072+ if (c->x86 == 6) {
14073 /* Duron Rev A0 */
14074 if (c->x86_model == 3 && c->x86_mask == 0)
14075 size = 64;
14076diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
14077index cf79302..b1b28ae 100644
14078--- a/arch/x86/kernel/cpu/common.c
14079+++ b/arch/x86/kernel/cpu/common.c
14080@@ -86,60 +86,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
14081
14082 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
14083
14084-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
14085-#ifdef CONFIG_X86_64
14086- /*
14087- * We need valid kernel segments for data and code in long mode too
14088- * IRET will check the segment types kkeil 2000/10/28
14089- * Also sysret mandates a special GDT layout
14090- *
14091- * TLS descriptors are currently at a different place compared to i386.
14092- * Hopefully nobody expects them at a fixed place (Wine?)
14093- */
14094- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
14095- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
14096- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
14097- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
14098- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
14099- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
14100-#else
14101- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
14102- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14103- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
14104- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
14105- /*
14106- * Segments used for calling PnP BIOS have byte granularity.
14107- * They code segments and data segments have fixed 64k limits,
14108- * the transfer segment sizes are set at run time.
14109- */
14110- /* 32-bit code */
14111- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14112- /* 16-bit code */
14113- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14114- /* 16-bit data */
14115- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
14116- /* 16-bit data */
14117- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
14118- /* 16-bit data */
14119- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
14120- /*
14121- * The APM segments have byte granularity and their bases
14122- * are set at run time. All have 64k limits.
14123- */
14124- /* 32-bit code */
14125- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14126- /* 16-bit code */
14127- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14128- /* data */
14129- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
14130-
14131- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14132- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14133- GDT_STACK_CANARY_INIT
14134-#endif
14135-} };
14136-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
14137-
14138 static int __init x86_xsave_setup(char *s)
14139 {
14140 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
14141@@ -374,7 +320,7 @@ void switch_to_new_gdt(int cpu)
14142 {
14143 struct desc_ptr gdt_descr;
14144
14145- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
14146+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14147 gdt_descr.size = GDT_SIZE - 1;
14148 load_gdt(&gdt_descr);
14149 /* Reload the per-cpu base */
14150@@ -841,6 +787,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
14151 /* Filter out anything that depends on CPUID levels we don't have */
14152 filter_cpuid_features(c, true);
14153
14154+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14155+ setup_clear_cpu_cap(X86_FEATURE_SEP);
14156+#endif
14157+
14158 /* If the model name is still unset, do table lookup. */
14159 if (!c->x86_model_id[0]) {
14160 const char *p;
14161@@ -1021,10 +971,12 @@ static __init int setup_disablecpuid(char *arg)
14162 }
14163 __setup("clearcpuid=", setup_disablecpuid);
14164
14165+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
14166+EXPORT_PER_CPU_SYMBOL(current_tinfo);
14167+
14168 #ifdef CONFIG_X86_64
14169 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
14170-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
14171- (unsigned long) nmi_idt_table };
14172+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
14173
14174 DEFINE_PER_CPU_FIRST(union irq_stack_union,
14175 irq_stack_union) __aligned(PAGE_SIZE);
14176@@ -1038,7 +990,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
14177 EXPORT_PER_CPU_SYMBOL(current_task);
14178
14179 DEFINE_PER_CPU(unsigned long, kernel_stack) =
14180- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
14181+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
14182 EXPORT_PER_CPU_SYMBOL(kernel_stack);
14183
14184 DEFINE_PER_CPU(char *, irq_stack_ptr) =
14185@@ -1126,7 +1078,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
14186 {
14187 memset(regs, 0, sizeof(struct pt_regs));
14188 regs->fs = __KERNEL_PERCPU;
14189- regs->gs = __KERNEL_STACK_CANARY;
14190+ savesegment(gs, regs->gs);
14191
14192 return regs;
14193 }
14194@@ -1181,7 +1133,7 @@ void __cpuinit cpu_init(void)
14195 int i;
14196
14197 cpu = stack_smp_processor_id();
14198- t = &per_cpu(init_tss, cpu);
14199+ t = init_tss + cpu;
14200 oist = &per_cpu(orig_ist, cpu);
14201
14202 #ifdef CONFIG_NUMA
14203@@ -1207,7 +1159,7 @@ void __cpuinit cpu_init(void)
14204 switch_to_new_gdt(cpu);
14205 loadsegment(fs, 0);
14206
14207- load_idt((const struct desc_ptr *)&idt_descr);
14208+ load_idt(&idt_descr);
14209
14210 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
14211 syscall_init();
14212@@ -1216,7 +1168,6 @@ void __cpuinit cpu_init(void)
14213 wrmsrl(MSR_KERNEL_GS_BASE, 0);
14214 barrier();
14215
14216- x86_configure_nx();
14217 if (cpu != 0)
14218 enable_x2apic();
14219
14220@@ -1272,7 +1223,7 @@ void __cpuinit cpu_init(void)
14221 {
14222 int cpu = smp_processor_id();
14223 struct task_struct *curr = current;
14224- struct tss_struct *t = &per_cpu(init_tss, cpu);
14225+ struct tss_struct *t = init_tss + cpu;
14226 struct thread_struct *thread = &curr->thread;
14227
14228 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
14229diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
14230index 3e6ff6c..54b4992 100644
14231--- a/arch/x86/kernel/cpu/intel.c
14232+++ b/arch/x86/kernel/cpu/intel.c
14233@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
14234 * Update the IDT descriptor and reload the IDT so that
14235 * it uses the read-only mapped virtual address.
14236 */
14237- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
14238+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
14239 load_idt(&idt_descr);
14240 }
14241 #endif
14242diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
14243index 61604ae..98250a5 100644
14244--- a/arch/x86/kernel/cpu/mcheck/mce.c
14245+++ b/arch/x86/kernel/cpu/mcheck/mce.c
14246@@ -42,6 +42,7 @@
14247 #include <asm/processor.h>
14248 #include <asm/mce.h>
14249 #include <asm/msr.h>
14250+#include <asm/local.h>
14251
14252 #include "mce-internal.h"
14253
14254@@ -250,7 +251,7 @@ static void print_mce(struct mce *m)
14255 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
14256 m->cs, m->ip);
14257
14258- if (m->cs == __KERNEL_CS)
14259+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
14260 print_symbol("{%s}", m->ip);
14261 pr_cont("\n");
14262 }
14263@@ -283,10 +284,10 @@ static void print_mce(struct mce *m)
14264
14265 #define PANIC_TIMEOUT 5 /* 5 seconds */
14266
14267-static atomic_t mce_paniced;
14268+static atomic_unchecked_t mce_paniced;
14269
14270 static int fake_panic;
14271-static atomic_t mce_fake_paniced;
14272+static atomic_unchecked_t mce_fake_paniced;
14273
14274 /* Panic in progress. Enable interrupts and wait for final IPI */
14275 static void wait_for_panic(void)
14276@@ -310,7 +311,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14277 /*
14278 * Make sure only one CPU runs in machine check panic
14279 */
14280- if (atomic_inc_return(&mce_paniced) > 1)
14281+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
14282 wait_for_panic();
14283 barrier();
14284
14285@@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14286 console_verbose();
14287 } else {
14288 /* Don't log too much for fake panic */
14289- if (atomic_inc_return(&mce_fake_paniced) > 1)
14290+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
14291 return;
14292 }
14293 /* First print corrected ones that are still unlogged */
14294@@ -684,7 +685,7 @@ static int mce_timed_out(u64 *t)
14295 * might have been modified by someone else.
14296 */
14297 rmb();
14298- if (atomic_read(&mce_paniced))
14299+ if (atomic_read_unchecked(&mce_paniced))
14300 wait_for_panic();
14301 if (!monarch_timeout)
14302 goto out;
14303@@ -1535,7 +1536,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
14304 }
14305
14306 /* Call the installed machine check handler for this CPU setup. */
14307-void (*machine_check_vector)(struct pt_regs *, long error_code) =
14308+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
14309 unexpected_machine_check;
14310
14311 /*
14312@@ -1558,7 +1559,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14313 return;
14314 }
14315
14316+ pax_open_kernel();
14317 machine_check_vector = do_machine_check;
14318+ pax_close_kernel();
14319
14320 __mcheck_cpu_init_generic();
14321 __mcheck_cpu_init_vendor(c);
14322@@ -1572,7 +1575,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14323 */
14324
14325 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
14326-static int mce_chrdev_open_count; /* #times opened */
14327+static local_t mce_chrdev_open_count; /* #times opened */
14328 static int mce_chrdev_open_exclu; /* already open exclusive? */
14329
14330 static int mce_chrdev_open(struct inode *inode, struct file *file)
14331@@ -1580,7 +1583,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14332 spin_lock(&mce_chrdev_state_lock);
14333
14334 if (mce_chrdev_open_exclu ||
14335- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
14336+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
14337 spin_unlock(&mce_chrdev_state_lock);
14338
14339 return -EBUSY;
14340@@ -1588,7 +1591,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14341
14342 if (file->f_flags & O_EXCL)
14343 mce_chrdev_open_exclu = 1;
14344- mce_chrdev_open_count++;
14345+ local_inc(&mce_chrdev_open_count);
14346
14347 spin_unlock(&mce_chrdev_state_lock);
14348
14349@@ -1599,7 +1602,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
14350 {
14351 spin_lock(&mce_chrdev_state_lock);
14352
14353- mce_chrdev_open_count--;
14354+ local_dec(&mce_chrdev_open_count);
14355 mce_chrdev_open_exclu = 0;
14356
14357 spin_unlock(&mce_chrdev_state_lock);
14358@@ -2324,7 +2327,7 @@ struct dentry *mce_get_debugfs_dir(void)
14359 static void mce_reset(void)
14360 {
14361 cpu_missing = 0;
14362- atomic_set(&mce_fake_paniced, 0);
14363+ atomic_set_unchecked(&mce_fake_paniced, 0);
14364 atomic_set(&mce_executing, 0);
14365 atomic_set(&mce_callin, 0);
14366 atomic_set(&global_nwo, 0);
14367diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
14368index 2d5454c..51987eb 100644
14369--- a/arch/x86/kernel/cpu/mcheck/p5.c
14370+++ b/arch/x86/kernel/cpu/mcheck/p5.c
14371@@ -11,6 +11,7 @@
14372 #include <asm/processor.h>
14373 #include <asm/mce.h>
14374 #include <asm/msr.h>
14375+#include <asm/pgtable.h>
14376
14377 /* By default disabled */
14378 int mce_p5_enabled __read_mostly;
14379@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
14380 if (!cpu_has(c, X86_FEATURE_MCE))
14381 return;
14382
14383+ pax_open_kernel();
14384 machine_check_vector = pentium_machine_check;
14385+ pax_close_kernel();
14386 /* Make sure the vector pointer is visible before we enable MCEs: */
14387 wmb();
14388
14389diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
14390index 2d7998f..17c9de1 100644
14391--- a/arch/x86/kernel/cpu/mcheck/winchip.c
14392+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
14393@@ -10,6 +10,7 @@
14394 #include <asm/processor.h>
14395 #include <asm/mce.h>
14396 #include <asm/msr.h>
14397+#include <asm/pgtable.h>
14398
14399 /* Machine check handler for WinChip C6: */
14400 static void winchip_machine_check(struct pt_regs *regs, long error_code)
14401@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
14402 {
14403 u32 lo, hi;
14404
14405+ pax_open_kernel();
14406 machine_check_vector = winchip_machine_check;
14407+ pax_close_kernel();
14408 /* Make sure the vector pointer is visible before we enable MCEs: */
14409 wmb();
14410
14411diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14412index 6b96110..0da73eb 100644
14413--- a/arch/x86/kernel/cpu/mtrr/main.c
14414+++ b/arch/x86/kernel/cpu/mtrr/main.c
14415@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
14416 u64 size_or_mask, size_and_mask;
14417 static bool mtrr_aps_delayed_init;
14418
14419-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14420+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14421
14422 const struct mtrr_ops *mtrr_if;
14423
14424diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14425index df5e41f..816c719 100644
14426--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14427+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14428@@ -25,7 +25,7 @@ struct mtrr_ops {
14429 int (*validate_add_page)(unsigned long base, unsigned long size,
14430 unsigned int type);
14431 int (*have_wrcomb)(void);
14432-};
14433+} __do_const;
14434
14435 extern int generic_get_free_region(unsigned long base, unsigned long size,
14436 int replace_reg);
14437diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14438index bb8e034..fb9020b 100644
14439--- a/arch/x86/kernel/cpu/perf_event.c
14440+++ b/arch/x86/kernel/cpu/perf_event.c
14441@@ -1835,7 +1835,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
14442 break;
14443
14444 perf_callchain_store(entry, frame.return_address);
14445- fp = frame.next_frame;
14446+ fp = (const void __force_user *)frame.next_frame;
14447 }
14448 }
14449
14450diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14451index 13ad899..f642b9a 100644
14452--- a/arch/x86/kernel/crash.c
14453+++ b/arch/x86/kernel/crash.c
14454@@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
14455 {
14456 #ifdef CONFIG_X86_32
14457 struct pt_regs fixed_regs;
14458-#endif
14459
14460-#ifdef CONFIG_X86_32
14461- if (!user_mode_vm(regs)) {
14462+ if (!user_mode(regs)) {
14463 crash_fixup_ss_esp(&fixed_regs, regs);
14464 regs = &fixed_regs;
14465 }
14466diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14467index 37250fe..bf2ec74 100644
14468--- a/arch/x86/kernel/doublefault_32.c
14469+++ b/arch/x86/kernel/doublefault_32.c
14470@@ -11,7 +11,7 @@
14471
14472 #define DOUBLEFAULT_STACKSIZE (1024)
14473 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14474-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14475+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14476
14477 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14478
14479@@ -21,7 +21,7 @@ static void doublefault_fn(void)
14480 unsigned long gdt, tss;
14481
14482 store_gdt(&gdt_desc);
14483- gdt = gdt_desc.address;
14484+ gdt = (unsigned long)gdt_desc.address;
14485
14486 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14487
14488@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14489 /* 0x2 bit is always set */
14490 .flags = X86_EFLAGS_SF | 0x2,
14491 .sp = STACK_START,
14492- .es = __USER_DS,
14493+ .es = __KERNEL_DS,
14494 .cs = __KERNEL_CS,
14495 .ss = __KERNEL_DS,
14496- .ds = __USER_DS,
14497+ .ds = __KERNEL_DS,
14498 .fs = __KERNEL_PERCPU,
14499
14500 .__cr3 = __pa_nodebug(swapper_pg_dir),
14501diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14502index 1b81839..0b4e7b0 100644
14503--- a/arch/x86/kernel/dumpstack.c
14504+++ b/arch/x86/kernel/dumpstack.c
14505@@ -2,6 +2,9 @@
14506 * Copyright (C) 1991, 1992 Linus Torvalds
14507 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14508 */
14509+#ifdef CONFIG_GRKERNSEC_HIDESYM
14510+#define __INCLUDED_BY_HIDESYM 1
14511+#endif
14512 #include <linux/kallsyms.h>
14513 #include <linux/kprobes.h>
14514 #include <linux/uaccess.h>
14515@@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
14516 static void
14517 print_ftrace_graph_addr(unsigned long addr, void *data,
14518 const struct stacktrace_ops *ops,
14519- struct thread_info *tinfo, int *graph)
14520+ struct task_struct *task, int *graph)
14521 {
14522- struct task_struct *task;
14523 unsigned long ret_addr;
14524 int index;
14525
14526 if (addr != (unsigned long)return_to_handler)
14527 return;
14528
14529- task = tinfo->task;
14530 index = task->curr_ret_stack;
14531
14532 if (!task->ret_stack || index < *graph)
14533@@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14534 static inline void
14535 print_ftrace_graph_addr(unsigned long addr, void *data,
14536 const struct stacktrace_ops *ops,
14537- struct thread_info *tinfo, int *graph)
14538+ struct task_struct *task, int *graph)
14539 { }
14540 #endif
14541
14542@@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14543 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14544 */
14545
14546-static inline int valid_stack_ptr(struct thread_info *tinfo,
14547- void *p, unsigned int size, void *end)
14548+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14549 {
14550- void *t = tinfo;
14551 if (end) {
14552 if (p < end && p >= (end-THREAD_SIZE))
14553 return 1;
14554@@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14555 }
14556
14557 unsigned long
14558-print_context_stack(struct thread_info *tinfo,
14559+print_context_stack(struct task_struct *task, void *stack_start,
14560 unsigned long *stack, unsigned long bp,
14561 const struct stacktrace_ops *ops, void *data,
14562 unsigned long *end, int *graph)
14563 {
14564 struct stack_frame *frame = (struct stack_frame *)bp;
14565
14566- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14567+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14568 unsigned long addr;
14569
14570 addr = *stack;
14571@@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
14572 } else {
14573 ops->address(data, addr, 0);
14574 }
14575- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14576+ print_ftrace_graph_addr(addr, data, ops, task, graph);
14577 }
14578 stack++;
14579 }
14580@@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
14581 EXPORT_SYMBOL_GPL(print_context_stack);
14582
14583 unsigned long
14584-print_context_stack_bp(struct thread_info *tinfo,
14585+print_context_stack_bp(struct task_struct *task, void *stack_start,
14586 unsigned long *stack, unsigned long bp,
14587 const struct stacktrace_ops *ops, void *data,
14588 unsigned long *end, int *graph)
14589@@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14590 struct stack_frame *frame = (struct stack_frame *)bp;
14591 unsigned long *ret_addr = &frame->return_address;
14592
14593- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
14594+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
14595 unsigned long addr = *ret_addr;
14596
14597 if (!__kernel_text_address(addr))
14598@@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14599 ops->address(data, addr, 1);
14600 frame = frame->next_frame;
14601 ret_addr = &frame->return_address;
14602- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14603+ print_ftrace_graph_addr(addr, data, ops, task, graph);
14604 }
14605
14606 return (unsigned long)frame;
14607@@ -189,7 +188,7 @@ void dump_stack(void)
14608
14609 bp = stack_frame(current, NULL);
14610 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14611- current->pid, current->comm, print_tainted(),
14612+ task_pid_nr(current), current->comm, print_tainted(),
14613 init_utsname()->release,
14614 (int)strcspn(init_utsname()->version, " "),
14615 init_utsname()->version);
14616@@ -225,6 +224,8 @@ unsigned __kprobes long oops_begin(void)
14617 }
14618 EXPORT_SYMBOL_GPL(oops_begin);
14619
14620+extern void gr_handle_kernel_exploit(void);
14621+
14622 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14623 {
14624 if (regs && kexec_should_crash(current))
14625@@ -246,7 +247,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14626 panic("Fatal exception in interrupt");
14627 if (panic_on_oops)
14628 panic("Fatal exception");
14629- do_exit(signr);
14630+
14631+ gr_handle_kernel_exploit();
14632+
14633+ do_group_exit(signr);
14634 }
14635
14636 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14637@@ -273,7 +277,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14638
14639 show_registers(regs);
14640 #ifdef CONFIG_X86_32
14641- if (user_mode_vm(regs)) {
14642+ if (user_mode(regs)) {
14643 sp = regs->sp;
14644 ss = regs->ss & 0xffff;
14645 } else {
14646@@ -301,7 +305,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14647 unsigned long flags = oops_begin();
14648 int sig = SIGSEGV;
14649
14650- if (!user_mode_vm(regs))
14651+ if (!user_mode(regs))
14652 report_bug(regs->ip, regs);
14653
14654 if (__die(str, regs, err))
14655diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14656index 88ec912..e95e935 100644
14657--- a/arch/x86/kernel/dumpstack_32.c
14658+++ b/arch/x86/kernel/dumpstack_32.c
14659@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14660 bp = stack_frame(task, regs);
14661
14662 for (;;) {
14663- struct thread_info *context;
14664+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14665
14666- context = (struct thread_info *)
14667- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14668- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
14669+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14670
14671- stack = (unsigned long *)context->previous_esp;
14672- if (!stack)
14673+ if (stack_start == task_stack_page(task))
14674 break;
14675+ stack = *(unsigned long **)stack_start;
14676 if (ops->stack(data, "IRQ") < 0)
14677 break;
14678 touch_nmi_watchdog();
14679@@ -87,7 +85,7 @@ void show_registers(struct pt_regs *regs)
14680 int i;
14681
14682 print_modules();
14683- __show_regs(regs, !user_mode_vm(regs));
14684+ __show_regs(regs, !user_mode(regs));
14685
14686 printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
14687 TASK_COMM_LEN, current->comm, task_pid_nr(current),
14688@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
14689 * When in-kernel, we also print out the stack and code at the
14690 * time of the fault..
14691 */
14692- if (!user_mode_vm(regs)) {
14693+ if (!user_mode(regs)) {
14694 unsigned int code_prologue = code_bytes * 43 / 64;
14695 unsigned int code_len = code_bytes;
14696 unsigned char c;
14697 u8 *ip;
14698+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14699
14700 printk(KERN_EMERG "Stack:\n");
14701 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
14702
14703 printk(KERN_EMERG "Code: ");
14704
14705- ip = (u8 *)regs->ip - code_prologue;
14706+ ip = (u8 *)regs->ip - code_prologue + cs_base;
14707 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14708 /* try starting at IP */
14709- ip = (u8 *)regs->ip;
14710+ ip = (u8 *)regs->ip + cs_base;
14711 code_len = code_len - code_prologue + 1;
14712 }
14713 for (i = 0; i < code_len; i++, ip++) {
14714@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
14715 printk(KERN_CONT " Bad EIP value.");
14716 break;
14717 }
14718- if (ip == (u8 *)regs->ip)
14719+ if (ip == (u8 *)regs->ip + cs_base)
14720 printk(KERN_CONT "<%02x> ", c);
14721 else
14722 printk(KERN_CONT "%02x ", c);
14723@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
14724 {
14725 unsigned short ud2;
14726
14727+ ip = ktla_ktva(ip);
14728 if (ip < PAGE_OFFSET)
14729 return 0;
14730 if (probe_kernel_address((unsigned short *)ip, ud2))
14731@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
14732
14733 return ud2 == 0x0b0f;
14734 }
14735+
14736+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14737+void pax_check_alloca(unsigned long size)
14738+{
14739+ unsigned long sp = (unsigned long)&sp, stack_left;
14740+
14741+ /* all kernel stacks are of the same size */
14742+ stack_left = sp & (THREAD_SIZE - 1);
14743+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14744+}
14745+EXPORT_SYMBOL(pax_check_alloca);
14746+#endif
14747diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14748index 17107bd..9623722 100644
14749--- a/arch/x86/kernel/dumpstack_64.c
14750+++ b/arch/x86/kernel/dumpstack_64.c
14751@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14752 unsigned long *irq_stack_end =
14753 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14754 unsigned used = 0;
14755- struct thread_info *tinfo;
14756 int graph = 0;
14757 unsigned long dummy;
14758+ void *stack_start;
14759
14760 if (!task)
14761 task = current;
14762@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14763 * current stack address. If the stacks consist of nested
14764 * exceptions
14765 */
14766- tinfo = task_thread_info(task);
14767 for (;;) {
14768 char *id;
14769 unsigned long *estack_end;
14770+
14771 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14772 &used, &id);
14773
14774@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14775 if (ops->stack(data, id) < 0)
14776 break;
14777
14778- bp = ops->walk_stack(tinfo, stack, bp, ops,
14779+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14780 data, estack_end, &graph);
14781 ops->stack(data, "<EOE>");
14782 /*
14783@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14784 * second-to-last pointer (index -2 to end) in the
14785 * exception stack:
14786 */
14787+ if ((u16)estack_end[-1] != __KERNEL_DS)
14788+ goto out;
14789 stack = (unsigned long *) estack_end[-2];
14790 continue;
14791 }
14792@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14793 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
14794 if (ops->stack(data, "IRQ") < 0)
14795 break;
14796- bp = ops->walk_stack(tinfo, stack, bp,
14797+ bp = ops->walk_stack(task, irq_stack, stack, bp,
14798 ops, data, irq_stack_end, &graph);
14799 /*
14800 * We link to the next stack (which would be
14801@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14802 /*
14803 * This handles the process stack:
14804 */
14805- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14806+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14807+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14808+out:
14809 put_cpu();
14810 }
14811 EXPORT_SYMBOL(dump_trace);
14812@@ -305,3 +309,50 @@ int is_valid_bugaddr(unsigned long ip)
14813
14814 return ud2 == 0x0b0f;
14815 }
14816+
14817+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14818+void pax_check_alloca(unsigned long size)
14819+{
14820+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14821+ unsigned cpu, used;
14822+ char *id;
14823+
14824+ /* check the process stack first */
14825+ stack_start = (unsigned long)task_stack_page(current);
14826+ stack_end = stack_start + THREAD_SIZE;
14827+ if (likely(stack_start <= sp && sp < stack_end)) {
14828+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
14829+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14830+ return;
14831+ }
14832+
14833+ cpu = get_cpu();
14834+
14835+ /* check the irq stacks */
14836+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14837+ stack_start = stack_end - IRQ_STACK_SIZE;
14838+ if (stack_start <= sp && sp < stack_end) {
14839+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14840+ put_cpu();
14841+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14842+ return;
14843+ }
14844+
14845+ /* check the exception stacks */
14846+ used = 0;
14847+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14848+ stack_start = stack_end - EXCEPTION_STKSZ;
14849+ if (stack_end && stack_start <= sp && sp < stack_end) {
14850+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14851+ put_cpu();
14852+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14853+ return;
14854+ }
14855+
14856+ put_cpu();
14857+
14858+ /* unknown stack */
14859+ BUG();
14860+}
14861+EXPORT_SYMBOL(pax_check_alloca);
14862+#endif
14863diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14864index 9b9f18b..9fcaa04 100644
14865--- a/arch/x86/kernel/early_printk.c
14866+++ b/arch/x86/kernel/early_printk.c
14867@@ -7,6 +7,7 @@
14868 #include <linux/pci_regs.h>
14869 #include <linux/pci_ids.h>
14870 #include <linux/errno.h>
14871+#include <linux/sched.h>
14872 #include <asm/io.h>
14873 #include <asm/processor.h>
14874 #include <asm/fcntl.h>
14875diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14876index 7b784f4..db6b628 100644
14877--- a/arch/x86/kernel/entry_32.S
14878+++ b/arch/x86/kernel/entry_32.S
14879@@ -179,13 +179,146 @@
14880 /*CFI_REL_OFFSET gs, PT_GS*/
14881 .endm
14882 .macro SET_KERNEL_GS reg
14883+
14884+#ifdef CONFIG_CC_STACKPROTECTOR
14885 movl $(__KERNEL_STACK_CANARY), \reg
14886+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14887+ movl $(__USER_DS), \reg
14888+#else
14889+ xorl \reg, \reg
14890+#endif
14891+
14892 movl \reg, %gs
14893 .endm
14894
14895 #endif /* CONFIG_X86_32_LAZY_GS */
14896
14897-.macro SAVE_ALL
14898+.macro pax_enter_kernel
14899+#ifdef CONFIG_PAX_KERNEXEC
14900+ call pax_enter_kernel
14901+#endif
14902+.endm
14903+
14904+.macro pax_exit_kernel
14905+#ifdef CONFIG_PAX_KERNEXEC
14906+ call pax_exit_kernel
14907+#endif
14908+.endm
14909+
14910+#ifdef CONFIG_PAX_KERNEXEC
14911+ENTRY(pax_enter_kernel)
14912+#ifdef CONFIG_PARAVIRT
14913+ pushl %eax
14914+ pushl %ecx
14915+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14916+ mov %eax, %esi
14917+#else
14918+ mov %cr0, %esi
14919+#endif
14920+ bts $16, %esi
14921+ jnc 1f
14922+ mov %cs, %esi
14923+ cmp $__KERNEL_CS, %esi
14924+ jz 3f
14925+ ljmp $__KERNEL_CS, $3f
14926+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14927+2:
14928+#ifdef CONFIG_PARAVIRT
14929+ mov %esi, %eax
14930+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14931+#else
14932+ mov %esi, %cr0
14933+#endif
14934+3:
14935+#ifdef CONFIG_PARAVIRT
14936+ popl %ecx
14937+ popl %eax
14938+#endif
14939+ ret
14940+ENDPROC(pax_enter_kernel)
14941+
14942+ENTRY(pax_exit_kernel)
14943+#ifdef CONFIG_PARAVIRT
14944+ pushl %eax
14945+ pushl %ecx
14946+#endif
14947+ mov %cs, %esi
14948+ cmp $__KERNEXEC_KERNEL_CS, %esi
14949+ jnz 2f
14950+#ifdef CONFIG_PARAVIRT
14951+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
14952+ mov %eax, %esi
14953+#else
14954+ mov %cr0, %esi
14955+#endif
14956+ btr $16, %esi
14957+ ljmp $__KERNEL_CS, $1f
14958+1:
14959+#ifdef CONFIG_PARAVIRT
14960+ mov %esi, %eax
14961+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
14962+#else
14963+ mov %esi, %cr0
14964+#endif
14965+2:
14966+#ifdef CONFIG_PARAVIRT
14967+ popl %ecx
14968+ popl %eax
14969+#endif
14970+ ret
14971+ENDPROC(pax_exit_kernel)
14972+#endif
14973+
14974+.macro pax_erase_kstack
14975+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14976+ call pax_erase_kstack
14977+#endif
14978+.endm
14979+
14980+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14981+/*
14982+ * ebp: thread_info
14983+ * ecx, edx: can be clobbered
14984+ */
14985+ENTRY(pax_erase_kstack)
14986+ pushl %edi
14987+ pushl %eax
14988+
14989+ mov TI_lowest_stack(%ebp), %edi
14990+ mov $-0xBEEF, %eax
14991+ std
14992+
14993+1: mov %edi, %ecx
14994+ and $THREAD_SIZE_asm - 1, %ecx
14995+ shr $2, %ecx
14996+ repne scasl
14997+ jecxz 2f
14998+
14999+ cmp $2*16, %ecx
15000+ jc 2f
15001+
15002+ mov $2*16, %ecx
15003+ repe scasl
15004+ jecxz 2f
15005+ jne 1b
15006+
15007+2: cld
15008+ mov %esp, %ecx
15009+ sub %edi, %ecx
15010+ shr $2, %ecx
15011+ rep stosl
15012+
15013+ mov TI_task_thread_sp0(%ebp), %edi
15014+ sub $128, %edi
15015+ mov %edi, TI_lowest_stack(%ebp)
15016+
15017+ popl %eax
15018+ popl %edi
15019+ ret
15020+ENDPROC(pax_erase_kstack)
15021+#endif
15022+
15023+.macro __SAVE_ALL _DS
15024 cld
15025 PUSH_GS
15026 pushl_cfi %fs
15027@@ -208,7 +341,7 @@
15028 CFI_REL_OFFSET ecx, 0
15029 pushl_cfi %ebx
15030 CFI_REL_OFFSET ebx, 0
15031- movl $(__USER_DS), %edx
15032+ movl $\_DS, %edx
15033 movl %edx, %ds
15034 movl %edx, %es
15035 movl $(__KERNEL_PERCPU), %edx
15036@@ -216,6 +349,15 @@
15037 SET_KERNEL_GS %edx
15038 .endm
15039
15040+.macro SAVE_ALL
15041+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
15042+ __SAVE_ALL __KERNEL_DS
15043+ pax_enter_kernel
15044+#else
15045+ __SAVE_ALL __USER_DS
15046+#endif
15047+.endm
15048+
15049 .macro RESTORE_INT_REGS
15050 popl_cfi %ebx
15051 CFI_RESTORE ebx
15052@@ -301,7 +443,7 @@ ENTRY(ret_from_fork)
15053 popfl_cfi
15054 jmp syscall_exit
15055 CFI_ENDPROC
15056-END(ret_from_fork)
15057+ENDPROC(ret_from_fork)
15058
15059 /*
15060 * Interrupt exit functions should be protected against kprobes
15061@@ -335,7 +477,15 @@ resume_userspace_sig:
15062 andl $SEGMENT_RPL_MASK, %eax
15063 #endif
15064 cmpl $USER_RPL, %eax
15065+
15066+#ifdef CONFIG_PAX_KERNEXEC
15067+ jae resume_userspace
15068+
15069+ pax_exit_kernel
15070+ jmp resume_kernel
15071+#else
15072 jb resume_kernel # not returning to v8086 or userspace
15073+#endif
15074
15075 ENTRY(resume_userspace)
15076 LOCKDEP_SYS_EXIT
15077@@ -347,8 +497,8 @@ ENTRY(resume_userspace)
15078 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15079 # int/exception return?
15080 jne work_pending
15081- jmp restore_all
15082-END(ret_from_exception)
15083+ jmp restore_all_pax
15084+ENDPROC(ret_from_exception)
15085
15086 #ifdef CONFIG_PREEMPT
15087 ENTRY(resume_kernel)
15088@@ -363,7 +513,7 @@ need_resched:
15089 jz restore_all
15090 call preempt_schedule_irq
15091 jmp need_resched
15092-END(resume_kernel)
15093+ENDPROC(resume_kernel)
15094 #endif
15095 CFI_ENDPROC
15096 /*
15097@@ -397,23 +547,34 @@ sysenter_past_esp:
15098 /*CFI_REL_OFFSET cs, 0*/
15099 /*
15100 * Push current_thread_info()->sysenter_return to the stack.
15101- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15102- * pushed above; +8 corresponds to copy_thread's esp0 setting.
15103 */
15104- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
15105+ pushl_cfi $0
15106 CFI_REL_OFFSET eip, 0
15107
15108 pushl_cfi %eax
15109 SAVE_ALL
15110+ GET_THREAD_INFO(%ebp)
15111+ movl TI_sysenter_return(%ebp),%ebp
15112+ movl %ebp,PT_EIP(%esp)
15113 ENABLE_INTERRUPTS(CLBR_NONE)
15114
15115 /*
15116 * Load the potential sixth argument from user stack.
15117 * Careful about security.
15118 */
15119+ movl PT_OLDESP(%esp),%ebp
15120+
15121+#ifdef CONFIG_PAX_MEMORY_UDEREF
15122+ mov PT_OLDSS(%esp),%ds
15123+1: movl %ds:(%ebp),%ebp
15124+ push %ss
15125+ pop %ds
15126+#else
15127 cmpl $__PAGE_OFFSET-3,%ebp
15128 jae syscall_fault
15129 1: movl (%ebp),%ebp
15130+#endif
15131+
15132 movl %ebp,PT_EBP(%esp)
15133 .section __ex_table,"a"
15134 .align 4
15135@@ -436,12 +597,24 @@ sysenter_do_call:
15136 testl $_TIF_ALLWORK_MASK, %ecx
15137 jne sysexit_audit
15138 sysenter_exit:
15139+
15140+#ifdef CONFIG_PAX_RANDKSTACK
15141+ pushl_cfi %eax
15142+ movl %esp, %eax
15143+ call pax_randomize_kstack
15144+ popl_cfi %eax
15145+#endif
15146+
15147+ pax_erase_kstack
15148+
15149 /* if something modifies registers it must also disable sysexit */
15150 movl PT_EIP(%esp), %edx
15151 movl PT_OLDESP(%esp), %ecx
15152 xorl %ebp,%ebp
15153 TRACE_IRQS_ON
15154 1: mov PT_FS(%esp), %fs
15155+2: mov PT_DS(%esp), %ds
15156+3: mov PT_ES(%esp), %es
15157 PTGS_TO_GS
15158 ENABLE_INTERRUPTS_SYSEXIT
15159
15160@@ -458,6 +631,9 @@ sysenter_audit:
15161 movl %eax,%edx /* 2nd arg: syscall number */
15162 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15163 call __audit_syscall_entry
15164+
15165+ pax_erase_kstack
15166+
15167 pushl_cfi %ebx
15168 movl PT_EAX(%esp),%eax /* reload syscall number */
15169 jmp sysenter_do_call
15170@@ -483,11 +659,17 @@ sysexit_audit:
15171
15172 CFI_ENDPROC
15173 .pushsection .fixup,"ax"
15174-2: movl $0,PT_FS(%esp)
15175+4: movl $0,PT_FS(%esp)
15176+ jmp 1b
15177+5: movl $0,PT_DS(%esp)
15178+ jmp 1b
15179+6: movl $0,PT_ES(%esp)
15180 jmp 1b
15181 .section __ex_table,"a"
15182 .align 4
15183- .long 1b,2b
15184+ .long 1b,4b
15185+ .long 2b,5b
15186+ .long 3b,6b
15187 .popsection
15188 PTGS_TO_GS_EX
15189 ENDPROC(ia32_sysenter_target)
15190@@ -520,6 +702,15 @@ syscall_exit:
15191 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15192 jne syscall_exit_work
15193
15194+restore_all_pax:
15195+
15196+#ifdef CONFIG_PAX_RANDKSTACK
15197+ movl %esp, %eax
15198+ call pax_randomize_kstack
15199+#endif
15200+
15201+ pax_erase_kstack
15202+
15203 restore_all:
15204 TRACE_IRQS_IRET
15205 restore_all_notrace:
15206@@ -579,14 +770,34 @@ ldt_ss:
15207 * compensating for the offset by changing to the ESPFIX segment with
15208 * a base address that matches for the difference.
15209 */
15210-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
15211+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
15212 mov %esp, %edx /* load kernel esp */
15213 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15214 mov %dx, %ax /* eax: new kernel esp */
15215 sub %eax, %edx /* offset (low word is 0) */
15216+#ifdef CONFIG_SMP
15217+ movl PER_CPU_VAR(cpu_number), %ebx
15218+ shll $PAGE_SHIFT_asm, %ebx
15219+ addl $cpu_gdt_table, %ebx
15220+#else
15221+ movl $cpu_gdt_table, %ebx
15222+#endif
15223 shr $16, %edx
15224- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
15225- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
15226+
15227+#ifdef CONFIG_PAX_KERNEXEC
15228+ mov %cr0, %esi
15229+ btr $16, %esi
15230+ mov %esi, %cr0
15231+#endif
15232+
15233+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
15234+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
15235+
15236+#ifdef CONFIG_PAX_KERNEXEC
15237+ bts $16, %esi
15238+ mov %esi, %cr0
15239+#endif
15240+
15241 pushl_cfi $__ESPFIX_SS
15242 pushl_cfi %eax /* new kernel esp */
15243 /* Disable interrupts, but do not irqtrace this section: we
15244@@ -615,38 +826,30 @@ work_resched:
15245 movl TI_flags(%ebp), %ecx
15246 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15247 # than syscall tracing?
15248- jz restore_all
15249+ jz restore_all_pax
15250 testb $_TIF_NEED_RESCHED, %cl
15251 jnz work_resched
15252
15253 work_notifysig: # deal with pending signals and
15254 # notify-resume requests
15255+ movl %esp, %eax
15256 #ifdef CONFIG_VM86
15257 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15258- movl %esp, %eax
15259- jne work_notifysig_v86 # returning to kernel-space or
15260+ jz 1f # returning to kernel-space or
15261 # vm86-space
15262- TRACE_IRQS_ON
15263- ENABLE_INTERRUPTS(CLBR_NONE)
15264- xorl %edx, %edx
15265- call do_notify_resume
15266- jmp resume_userspace_sig
15267
15268- ALIGN
15269-work_notifysig_v86:
15270 pushl_cfi %ecx # save ti_flags for do_notify_resume
15271 call save_v86_state # %eax contains pt_regs pointer
15272 popl_cfi %ecx
15273 movl %eax, %esp
15274-#else
15275- movl %esp, %eax
15276+1:
15277 #endif
15278 TRACE_IRQS_ON
15279 ENABLE_INTERRUPTS(CLBR_NONE)
15280 xorl %edx, %edx
15281 call do_notify_resume
15282 jmp resume_userspace_sig
15283-END(work_pending)
15284+ENDPROC(work_pending)
15285
15286 # perform syscall exit tracing
15287 ALIGN
15288@@ -654,11 +857,14 @@ syscall_trace_entry:
15289 movl $-ENOSYS,PT_EAX(%esp)
15290 movl %esp, %eax
15291 call syscall_trace_enter
15292+
15293+ pax_erase_kstack
15294+
15295 /* What it returned is what we'll actually use. */
15296 cmpl $(NR_syscalls), %eax
15297 jnae syscall_call
15298 jmp syscall_exit
15299-END(syscall_trace_entry)
15300+ENDPROC(syscall_trace_entry)
15301
15302 # perform syscall exit tracing
15303 ALIGN
15304@@ -671,20 +877,24 @@ syscall_exit_work:
15305 movl %esp, %eax
15306 call syscall_trace_leave
15307 jmp resume_userspace
15308-END(syscall_exit_work)
15309+ENDPROC(syscall_exit_work)
15310 CFI_ENDPROC
15311
15312 RING0_INT_FRAME # can't unwind into user space anyway
15313 syscall_fault:
15314+#ifdef CONFIG_PAX_MEMORY_UDEREF
15315+ push %ss
15316+ pop %ds
15317+#endif
15318 GET_THREAD_INFO(%ebp)
15319 movl $-EFAULT,PT_EAX(%esp)
15320 jmp resume_userspace
15321-END(syscall_fault)
15322+ENDPROC(syscall_fault)
15323
15324 syscall_badsys:
15325 movl $-ENOSYS,PT_EAX(%esp)
15326 jmp resume_userspace
15327-END(syscall_badsys)
15328+ENDPROC(syscall_badsys)
15329 CFI_ENDPROC
15330 /*
15331 * End of kprobes section
15332@@ -756,6 +966,36 @@ ENTRY(ptregs_clone)
15333 CFI_ENDPROC
15334 ENDPROC(ptregs_clone)
15335
15336+ ALIGN;
15337+ENTRY(kernel_execve)
15338+ CFI_STARTPROC
15339+ pushl_cfi %ebp
15340+ sub $PT_OLDSS+4,%esp
15341+ pushl_cfi %edi
15342+ pushl_cfi %ecx
15343+ pushl_cfi %eax
15344+ lea 3*4(%esp),%edi
15345+ mov $PT_OLDSS/4+1,%ecx
15346+ xorl %eax,%eax
15347+ rep stosl
15348+ popl_cfi %eax
15349+ popl_cfi %ecx
15350+ popl_cfi %edi
15351+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15352+ pushl_cfi %esp
15353+ call sys_execve
15354+ add $4,%esp
15355+ CFI_ADJUST_CFA_OFFSET -4
15356+ GET_THREAD_INFO(%ebp)
15357+ test %eax,%eax
15358+ jz syscall_exit
15359+ add $PT_OLDSS+4,%esp
15360+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
15361+ popl_cfi %ebp
15362+ ret
15363+ CFI_ENDPROC
15364+ENDPROC(kernel_execve)
15365+
15366 .macro FIXUP_ESPFIX_STACK
15367 /*
15368 * Switch back for ESPFIX stack to the normal zerobased stack
15369@@ -765,8 +1005,15 @@ ENDPROC(ptregs_clone)
15370 * normal stack and adjusts ESP with the matching offset.
15371 */
15372 /* fixup the stack */
15373- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
15374- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
15375+#ifdef CONFIG_SMP
15376+ movl PER_CPU_VAR(cpu_number), %ebx
15377+ shll $PAGE_SHIFT_asm, %ebx
15378+ addl $cpu_gdt_table, %ebx
15379+#else
15380+ movl $cpu_gdt_table, %ebx
15381+#endif
15382+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
15383+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
15384 shl $16, %eax
15385 addl %esp, %eax /* the adjusted stack pointer */
15386 pushl_cfi $__KERNEL_DS
15387@@ -819,7 +1066,7 @@ vector=vector+1
15388 .endr
15389 2: jmp common_interrupt
15390 .endr
15391-END(irq_entries_start)
15392+ENDPROC(irq_entries_start)
15393
15394 .previous
15395 END(interrupt)
15396@@ -867,7 +1114,7 @@ ENTRY(coprocessor_error)
15397 pushl_cfi $do_coprocessor_error
15398 jmp error_code
15399 CFI_ENDPROC
15400-END(coprocessor_error)
15401+ENDPROC(coprocessor_error)
15402
15403 ENTRY(simd_coprocessor_error)
15404 RING0_INT_FRAME
15405@@ -888,7 +1135,7 @@ ENTRY(simd_coprocessor_error)
15406 #endif
15407 jmp error_code
15408 CFI_ENDPROC
15409-END(simd_coprocessor_error)
15410+ENDPROC(simd_coprocessor_error)
15411
15412 ENTRY(device_not_available)
15413 RING0_INT_FRAME
15414@@ -896,7 +1143,7 @@ ENTRY(device_not_available)
15415 pushl_cfi $do_device_not_available
15416 jmp error_code
15417 CFI_ENDPROC
15418-END(device_not_available)
15419+ENDPROC(device_not_available)
15420
15421 #ifdef CONFIG_PARAVIRT
15422 ENTRY(native_iret)
15423@@ -905,12 +1152,12 @@ ENTRY(native_iret)
15424 .align 4
15425 .long native_iret, iret_exc
15426 .previous
15427-END(native_iret)
15428+ENDPROC(native_iret)
15429
15430 ENTRY(native_irq_enable_sysexit)
15431 sti
15432 sysexit
15433-END(native_irq_enable_sysexit)
15434+ENDPROC(native_irq_enable_sysexit)
15435 #endif
15436
15437 ENTRY(overflow)
15438@@ -919,7 +1166,7 @@ ENTRY(overflow)
15439 pushl_cfi $do_overflow
15440 jmp error_code
15441 CFI_ENDPROC
15442-END(overflow)
15443+ENDPROC(overflow)
15444
15445 ENTRY(bounds)
15446 RING0_INT_FRAME
15447@@ -927,7 +1174,7 @@ ENTRY(bounds)
15448 pushl_cfi $do_bounds
15449 jmp error_code
15450 CFI_ENDPROC
15451-END(bounds)
15452+ENDPROC(bounds)
15453
15454 ENTRY(invalid_op)
15455 RING0_INT_FRAME
15456@@ -935,7 +1182,7 @@ ENTRY(invalid_op)
15457 pushl_cfi $do_invalid_op
15458 jmp error_code
15459 CFI_ENDPROC
15460-END(invalid_op)
15461+ENDPROC(invalid_op)
15462
15463 ENTRY(coprocessor_segment_overrun)
15464 RING0_INT_FRAME
15465@@ -943,35 +1190,35 @@ ENTRY(coprocessor_segment_overrun)
15466 pushl_cfi $do_coprocessor_segment_overrun
15467 jmp error_code
15468 CFI_ENDPROC
15469-END(coprocessor_segment_overrun)
15470+ENDPROC(coprocessor_segment_overrun)
15471
15472 ENTRY(invalid_TSS)
15473 RING0_EC_FRAME
15474 pushl_cfi $do_invalid_TSS
15475 jmp error_code
15476 CFI_ENDPROC
15477-END(invalid_TSS)
15478+ENDPROC(invalid_TSS)
15479
15480 ENTRY(segment_not_present)
15481 RING0_EC_FRAME
15482 pushl_cfi $do_segment_not_present
15483 jmp error_code
15484 CFI_ENDPROC
15485-END(segment_not_present)
15486+ENDPROC(segment_not_present)
15487
15488 ENTRY(stack_segment)
15489 RING0_EC_FRAME
15490 pushl_cfi $do_stack_segment
15491 jmp error_code
15492 CFI_ENDPROC
15493-END(stack_segment)
15494+ENDPROC(stack_segment)
15495
15496 ENTRY(alignment_check)
15497 RING0_EC_FRAME
15498 pushl_cfi $do_alignment_check
15499 jmp error_code
15500 CFI_ENDPROC
15501-END(alignment_check)
15502+ENDPROC(alignment_check)
15503
15504 ENTRY(divide_error)
15505 RING0_INT_FRAME
15506@@ -979,7 +1226,7 @@ ENTRY(divide_error)
15507 pushl_cfi $do_divide_error
15508 jmp error_code
15509 CFI_ENDPROC
15510-END(divide_error)
15511+ENDPROC(divide_error)
15512
15513 #ifdef CONFIG_X86_MCE
15514 ENTRY(machine_check)
15515@@ -988,7 +1235,7 @@ ENTRY(machine_check)
15516 pushl_cfi machine_check_vector
15517 jmp error_code
15518 CFI_ENDPROC
15519-END(machine_check)
15520+ENDPROC(machine_check)
15521 #endif
15522
15523 ENTRY(spurious_interrupt_bug)
15524@@ -997,7 +1244,7 @@ ENTRY(spurious_interrupt_bug)
15525 pushl_cfi $do_spurious_interrupt_bug
15526 jmp error_code
15527 CFI_ENDPROC
15528-END(spurious_interrupt_bug)
15529+ENDPROC(spurious_interrupt_bug)
15530 /*
15531 * End of kprobes section
15532 */
15533@@ -1112,7 +1359,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
15534
15535 ENTRY(mcount)
15536 ret
15537-END(mcount)
15538+ENDPROC(mcount)
15539
15540 ENTRY(ftrace_caller)
15541 cmpl $0, function_trace_stop
15542@@ -1141,7 +1388,7 @@ ftrace_graph_call:
15543 .globl ftrace_stub
15544 ftrace_stub:
15545 ret
15546-END(ftrace_caller)
15547+ENDPROC(ftrace_caller)
15548
15549 #else /* ! CONFIG_DYNAMIC_FTRACE */
15550
15551@@ -1177,7 +1424,7 @@ trace:
15552 popl %ecx
15553 popl %eax
15554 jmp ftrace_stub
15555-END(mcount)
15556+ENDPROC(mcount)
15557 #endif /* CONFIG_DYNAMIC_FTRACE */
15558 #endif /* CONFIG_FUNCTION_TRACER */
15559
15560@@ -1198,7 +1445,7 @@ ENTRY(ftrace_graph_caller)
15561 popl %ecx
15562 popl %eax
15563 ret
15564-END(ftrace_graph_caller)
15565+ENDPROC(ftrace_graph_caller)
15566
15567 .globl return_to_handler
15568 return_to_handler:
15569@@ -1253,15 +1500,18 @@ error_code:
15570 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15571 REG_TO_PTGS %ecx
15572 SET_KERNEL_GS %ecx
15573- movl $(__USER_DS), %ecx
15574+ movl $(__KERNEL_DS), %ecx
15575 movl %ecx, %ds
15576 movl %ecx, %es
15577+
15578+ pax_enter_kernel
15579+
15580 TRACE_IRQS_OFF
15581 movl %esp,%eax # pt_regs pointer
15582 call *%edi
15583 jmp ret_from_exception
15584 CFI_ENDPROC
15585-END(page_fault)
15586+ENDPROC(page_fault)
15587
15588 /*
15589 * Debug traps and NMI can happen at the one SYSENTER instruction
15590@@ -1303,7 +1553,7 @@ debug_stack_correct:
15591 call do_debug
15592 jmp ret_from_exception
15593 CFI_ENDPROC
15594-END(debug)
15595+ENDPROC(debug)
15596
15597 /*
15598 * NMI is doubly nasty. It can happen _while_ we're handling
15599@@ -1340,6 +1590,9 @@ nmi_stack_correct:
15600 xorl %edx,%edx # zero error code
15601 movl %esp,%eax # pt_regs pointer
15602 call do_nmi
15603+
15604+ pax_exit_kernel
15605+
15606 jmp restore_all_notrace
15607 CFI_ENDPROC
15608
15609@@ -1376,12 +1629,15 @@ nmi_espfix_stack:
15610 FIXUP_ESPFIX_STACK # %eax == %esp
15611 xorl %edx,%edx # zero error code
15612 call do_nmi
15613+
15614+ pax_exit_kernel
15615+
15616 RESTORE_REGS
15617 lss 12+4(%esp), %esp # back to espfix stack
15618 CFI_ADJUST_CFA_OFFSET -24
15619 jmp irq_return
15620 CFI_ENDPROC
15621-END(nmi)
15622+ENDPROC(nmi)
15623
15624 ENTRY(int3)
15625 RING0_INT_FRAME
15626@@ -1393,14 +1649,14 @@ ENTRY(int3)
15627 call do_int3
15628 jmp ret_from_exception
15629 CFI_ENDPROC
15630-END(int3)
15631+ENDPROC(int3)
15632
15633 ENTRY(general_protection)
15634 RING0_EC_FRAME
15635 pushl_cfi $do_general_protection
15636 jmp error_code
15637 CFI_ENDPROC
15638-END(general_protection)
15639+ENDPROC(general_protection)
15640
15641 #ifdef CONFIG_KVM_GUEST
15642 ENTRY(async_page_fault)
15643@@ -1408,7 +1664,7 @@ ENTRY(async_page_fault)
15644 pushl_cfi $do_async_page_fault
15645 jmp error_code
15646 CFI_ENDPROC
15647-END(async_page_fault)
15648+ENDPROC(async_page_fault)
15649 #endif
15650
15651 /*
15652diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15653index cdc79b5..4710a75 100644
15654--- a/arch/x86/kernel/entry_64.S
15655+++ b/arch/x86/kernel/entry_64.S
15656@@ -56,6 +56,8 @@
15657 #include <asm/ftrace.h>
15658 #include <asm/percpu.h>
15659 #include <linux/err.h>
15660+#include <asm/pgtable.h>
15661+#include <asm/alternative-asm.h>
15662
15663 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15664 #include <linux/elf-em.h>
15665@@ -69,8 +71,9 @@
15666 #ifdef CONFIG_FUNCTION_TRACER
15667 #ifdef CONFIG_DYNAMIC_FTRACE
15668 ENTRY(mcount)
15669+ pax_force_retaddr
15670 retq
15671-END(mcount)
15672+ENDPROC(mcount)
15673
15674 ENTRY(ftrace_caller)
15675 cmpl $0, function_trace_stop
15676@@ -93,8 +96,9 @@ GLOBAL(ftrace_graph_call)
15677 #endif
15678
15679 GLOBAL(ftrace_stub)
15680+ pax_force_retaddr
15681 retq
15682-END(ftrace_caller)
15683+ENDPROC(ftrace_caller)
15684
15685 #else /* ! CONFIG_DYNAMIC_FTRACE */
15686 ENTRY(mcount)
15687@@ -113,6 +117,7 @@ ENTRY(mcount)
15688 #endif
15689
15690 GLOBAL(ftrace_stub)
15691+ pax_force_retaddr
15692 retq
15693
15694 trace:
15695@@ -122,12 +127,13 @@ trace:
15696 movq 8(%rbp), %rsi
15697 subq $MCOUNT_INSN_SIZE, %rdi
15698
15699+ pax_force_fptr ftrace_trace_function
15700 call *ftrace_trace_function
15701
15702 MCOUNT_RESTORE_FRAME
15703
15704 jmp ftrace_stub
15705-END(mcount)
15706+ENDPROC(mcount)
15707 #endif /* CONFIG_DYNAMIC_FTRACE */
15708 #endif /* CONFIG_FUNCTION_TRACER */
15709
15710@@ -147,8 +153,9 @@ ENTRY(ftrace_graph_caller)
15711
15712 MCOUNT_RESTORE_FRAME
15713
15714+ pax_force_retaddr
15715 retq
15716-END(ftrace_graph_caller)
15717+ENDPROC(ftrace_graph_caller)
15718
15719 GLOBAL(return_to_handler)
15720 subq $24, %rsp
15721@@ -164,6 +171,7 @@ GLOBAL(return_to_handler)
15722 movq 8(%rsp), %rdx
15723 movq (%rsp), %rax
15724 addq $24, %rsp
15725+ pax_force_fptr %rdi
15726 jmp *%rdi
15727 #endif
15728
15729@@ -179,6 +187,282 @@ ENTRY(native_usergs_sysret64)
15730 ENDPROC(native_usergs_sysret64)
15731 #endif /* CONFIG_PARAVIRT */
15732
15733+ .macro ljmpq sel, off
15734+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15735+ .byte 0x48; ljmp *1234f(%rip)
15736+ .pushsection .rodata
15737+ .align 16
15738+ 1234: .quad \off; .word \sel
15739+ .popsection
15740+#else
15741+ pushq $\sel
15742+ pushq $\off
15743+ lretq
15744+#endif
15745+ .endm
15746+
15747+ .macro pax_enter_kernel
15748+ pax_set_fptr_mask
15749+#ifdef CONFIG_PAX_KERNEXEC
15750+ call pax_enter_kernel
15751+#endif
15752+ .endm
15753+
15754+ .macro pax_exit_kernel
15755+#ifdef CONFIG_PAX_KERNEXEC
15756+ call pax_exit_kernel
15757+#endif
15758+ .endm
15759+
15760+#ifdef CONFIG_PAX_KERNEXEC
15761+ENTRY(pax_enter_kernel)
15762+ pushq %rdi
15763+
15764+#ifdef CONFIG_PARAVIRT
15765+ PV_SAVE_REGS(CLBR_RDI)
15766+#endif
15767+
15768+ GET_CR0_INTO_RDI
15769+ bts $16,%rdi
15770+ jnc 3f
15771+ mov %cs,%edi
15772+ cmp $__KERNEL_CS,%edi
15773+ jnz 2f
15774+1:
15775+
15776+#ifdef CONFIG_PARAVIRT
15777+ PV_RESTORE_REGS(CLBR_RDI)
15778+#endif
15779+
15780+ popq %rdi
15781+ pax_force_retaddr
15782+ retq
15783+
15784+2: ljmpq __KERNEL_CS,1f
15785+3: ljmpq __KERNEXEC_KERNEL_CS,4f
15786+4: SET_RDI_INTO_CR0
15787+ jmp 1b
15788+ENDPROC(pax_enter_kernel)
15789+
15790+ENTRY(pax_exit_kernel)
15791+ pushq %rdi
15792+
15793+#ifdef CONFIG_PARAVIRT
15794+ PV_SAVE_REGS(CLBR_RDI)
15795+#endif
15796+
15797+ mov %cs,%rdi
15798+ cmp $__KERNEXEC_KERNEL_CS,%edi
15799+ jz 2f
15800+1:
15801+
15802+#ifdef CONFIG_PARAVIRT
15803+ PV_RESTORE_REGS(CLBR_RDI);
15804+#endif
15805+
15806+ popq %rdi
15807+ pax_force_retaddr
15808+ retq
15809+
15810+2: GET_CR0_INTO_RDI
15811+ btr $16,%rdi
15812+ ljmpq __KERNEL_CS,3f
15813+3: SET_RDI_INTO_CR0
15814+ jmp 1b
15815+#ifdef CONFIG_PARAVIRT
15816+ PV_RESTORE_REGS(CLBR_RDI);
15817+#endif
15818+
15819+ popq %rdi
15820+ pax_force_retaddr
15821+ retq
15822+ENDPROC(pax_exit_kernel)
15823+#endif
15824+
15825+ .macro pax_enter_kernel_user
15826+ pax_set_fptr_mask
15827+#ifdef CONFIG_PAX_MEMORY_UDEREF
15828+ call pax_enter_kernel_user
15829+#endif
15830+ .endm
15831+
15832+ .macro pax_exit_kernel_user
15833+#ifdef CONFIG_PAX_MEMORY_UDEREF
15834+ call pax_exit_kernel_user
15835+#endif
15836+#ifdef CONFIG_PAX_RANDKSTACK
15837+ pushq %rax
15838+ call pax_randomize_kstack
15839+ popq %rax
15840+#endif
15841+ .endm
15842+
15843+#ifdef CONFIG_PAX_MEMORY_UDEREF
15844+ENTRY(pax_enter_kernel_user)
15845+ pushq %rdi
15846+ pushq %rbx
15847+
15848+#ifdef CONFIG_PARAVIRT
15849+ PV_SAVE_REGS(CLBR_RDI)
15850+#endif
15851+
15852+ GET_CR3_INTO_RDI
15853+ mov %rdi,%rbx
15854+ add $__START_KERNEL_map,%rbx
15855+ sub phys_base(%rip),%rbx
15856+
15857+#ifdef CONFIG_PARAVIRT
15858+ pushq %rdi
15859+ cmpl $0, pv_info+PARAVIRT_enabled
15860+ jz 1f
15861+ i = 0
15862+ .rept USER_PGD_PTRS
15863+ mov i*8(%rbx),%rsi
15864+ mov $0,%sil
15865+ lea i*8(%rbx),%rdi
15866+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15867+ i = i + 1
15868+ .endr
15869+ jmp 2f
15870+1:
15871+#endif
15872+
15873+ i = 0
15874+ .rept USER_PGD_PTRS
15875+ movb $0,i*8(%rbx)
15876+ i = i + 1
15877+ .endr
15878+
15879+#ifdef CONFIG_PARAVIRT
15880+2: popq %rdi
15881+#endif
15882+ SET_RDI_INTO_CR3
15883+
15884+#ifdef CONFIG_PAX_KERNEXEC
15885+ GET_CR0_INTO_RDI
15886+ bts $16,%rdi
15887+ SET_RDI_INTO_CR0
15888+#endif
15889+
15890+#ifdef CONFIG_PARAVIRT
15891+ PV_RESTORE_REGS(CLBR_RDI)
15892+#endif
15893+
15894+ popq %rbx
15895+ popq %rdi
15896+ pax_force_retaddr
15897+ retq
15898+ENDPROC(pax_enter_kernel_user)
15899+
15900+ENTRY(pax_exit_kernel_user)
15901+ push %rdi
15902+
15903+#ifdef CONFIG_PARAVIRT
15904+ pushq %rbx
15905+ PV_SAVE_REGS(CLBR_RDI)
15906+#endif
15907+
15908+#ifdef CONFIG_PAX_KERNEXEC
15909+ GET_CR0_INTO_RDI
15910+ btr $16,%rdi
15911+ SET_RDI_INTO_CR0
15912+#endif
15913+
15914+ GET_CR3_INTO_RDI
15915+ add $__START_KERNEL_map,%rdi
15916+ sub phys_base(%rip),%rdi
15917+
15918+#ifdef CONFIG_PARAVIRT
15919+ cmpl $0, pv_info+PARAVIRT_enabled
15920+ jz 1f
15921+ mov %rdi,%rbx
15922+ i = 0
15923+ .rept USER_PGD_PTRS
15924+ mov i*8(%rbx),%rsi
15925+ mov $0x67,%sil
15926+ lea i*8(%rbx),%rdi
15927+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15928+ i = i + 1
15929+ .endr
15930+ jmp 2f
15931+1:
15932+#endif
15933+
15934+ i = 0
15935+ .rept USER_PGD_PTRS
15936+ movb $0x67,i*8(%rdi)
15937+ i = i + 1
15938+ .endr
15939+
15940+#ifdef CONFIG_PARAVIRT
15941+2: PV_RESTORE_REGS(CLBR_RDI)
15942+ popq %rbx
15943+#endif
15944+
15945+ popq %rdi
15946+ pax_force_retaddr
15947+ retq
15948+ENDPROC(pax_exit_kernel_user)
15949+#endif
15950+
15951+.macro pax_erase_kstack
15952+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15953+ call pax_erase_kstack
15954+#endif
15955+.endm
15956+
15957+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15958+/*
15959+ * r11: thread_info
15960+ * rcx, rdx: can be clobbered
15961+ */
15962+ENTRY(pax_erase_kstack)
15963+ pushq %rdi
15964+ pushq %rax
15965+ pushq %r11
15966+
15967+ GET_THREAD_INFO(%r11)
15968+ mov TI_lowest_stack(%r11), %rdi
15969+ mov $-0xBEEF, %rax
15970+ std
15971+
15972+1: mov %edi, %ecx
15973+ and $THREAD_SIZE_asm - 1, %ecx
15974+ shr $3, %ecx
15975+ repne scasq
15976+ jecxz 2f
15977+
15978+ cmp $2*8, %ecx
15979+ jc 2f
15980+
15981+ mov $2*8, %ecx
15982+ repe scasq
15983+ jecxz 2f
15984+ jne 1b
15985+
15986+2: cld
15987+ mov %esp, %ecx
15988+ sub %edi, %ecx
15989+
15990+ cmp $THREAD_SIZE_asm, %rcx
15991+ jb 3f
15992+ ud2
15993+3:
15994+
15995+ shr $3, %ecx
15996+ rep stosq
15997+
15998+ mov TI_task_thread_sp0(%r11), %rdi
15999+ sub $256, %rdi
16000+ mov %rdi, TI_lowest_stack(%r11)
16001+
16002+ popq %r11
16003+ popq %rax
16004+ popq %rdi
16005+ pax_force_retaddr
16006+ ret
16007+ENDPROC(pax_erase_kstack)
16008+#endif
16009
16010 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
16011 #ifdef CONFIG_TRACE_IRQFLAGS
16012@@ -232,8 +516,8 @@ ENDPROC(native_usergs_sysret64)
16013 .endm
16014
16015 .macro UNFAKE_STACK_FRAME
16016- addq $8*6, %rsp
16017- CFI_ADJUST_CFA_OFFSET -(6*8)
16018+ addq $8*6 + ARG_SKIP, %rsp
16019+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
16020 .endm
16021
16022 /*
16023@@ -320,7 +604,7 @@ ENDPROC(native_usergs_sysret64)
16024 movq %rsp, %rsi
16025
16026 leaq -RBP(%rsp),%rdi /* arg1 for handler */
16027- testl $3, CS-RBP(%rsi)
16028+ testb $3, CS-RBP(%rsi)
16029 je 1f
16030 SWAPGS
16031 /*
16032@@ -355,9 +639,10 @@ ENTRY(save_rest)
16033 movq_cfi r15, R15+16
16034 movq %r11, 8(%rsp) /* return address */
16035 FIXUP_TOP_OF_STACK %r11, 16
16036+ pax_force_retaddr
16037 ret
16038 CFI_ENDPROC
16039-END(save_rest)
16040+ENDPROC(save_rest)
16041
16042 /* save complete stack frame */
16043 .pushsection .kprobes.text, "ax"
16044@@ -386,9 +671,10 @@ ENTRY(save_paranoid)
16045 js 1f /* negative -> in kernel */
16046 SWAPGS
16047 xorl %ebx,%ebx
16048-1: ret
16049+1: pax_force_retaddr_bts
16050+ ret
16051 CFI_ENDPROC
16052-END(save_paranoid)
16053+ENDPROC(save_paranoid)
16054 .popsection
16055
16056 /*
16057@@ -410,7 +696,7 @@ ENTRY(ret_from_fork)
16058
16059 RESTORE_REST
16060
16061- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16062+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16063 jz retint_restore_args
16064
16065 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
16066@@ -420,7 +706,7 @@ ENTRY(ret_from_fork)
16067 jmp ret_from_sys_call # go to the SYSRET fastpath
16068
16069 CFI_ENDPROC
16070-END(ret_from_fork)
16071+ENDPROC(ret_from_fork)
16072
16073 /*
16074 * System call entry. Up to 6 arguments in registers are supported.
16075@@ -456,7 +742,7 @@ END(ret_from_fork)
16076 ENTRY(system_call)
16077 CFI_STARTPROC simple
16078 CFI_SIGNAL_FRAME
16079- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16080+ CFI_DEF_CFA rsp,0
16081 CFI_REGISTER rip,rcx
16082 /*CFI_REGISTER rflags,r11*/
16083 SWAPGS_UNSAFE_STACK
16084@@ -469,16 +755,18 @@ GLOBAL(system_call_after_swapgs)
16085
16086 movq %rsp,PER_CPU_VAR(old_rsp)
16087 movq PER_CPU_VAR(kernel_stack),%rsp
16088+ SAVE_ARGS 8*6,0
16089+ pax_enter_kernel_user
16090 /*
16091 * No need to follow this irqs off/on section - it's straight
16092 * and short:
16093 */
16094 ENABLE_INTERRUPTS(CLBR_NONE)
16095- SAVE_ARGS 8,0
16096 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16097 movq %rcx,RIP-ARGOFFSET(%rsp)
16098 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16099- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16100+ GET_THREAD_INFO(%rcx)
16101+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
16102 jnz tracesys
16103 system_call_fastpath:
16104 #if __SYSCALL_MASK == ~0
16105@@ -488,7 +776,7 @@ system_call_fastpath:
16106 cmpl $__NR_syscall_max,%eax
16107 #endif
16108 ja badsys
16109- movq %r10,%rcx
16110+ movq R10-ARGOFFSET(%rsp),%rcx
16111 call *sys_call_table(,%rax,8) # XXX: rip relative
16112 movq %rax,RAX-ARGOFFSET(%rsp)
16113 /*
16114@@ -502,10 +790,13 @@ sysret_check:
16115 LOCKDEP_SYS_EXIT
16116 DISABLE_INTERRUPTS(CLBR_NONE)
16117 TRACE_IRQS_OFF
16118- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
16119+ GET_THREAD_INFO(%rcx)
16120+ movl TI_flags(%rcx),%edx
16121 andl %edi,%edx
16122 jnz sysret_careful
16123 CFI_REMEMBER_STATE
16124+ pax_exit_kernel_user
16125+ pax_erase_kstack
16126 /*
16127 * sysretq will re-enable interrupts:
16128 */
16129@@ -557,14 +848,18 @@ badsys:
16130 * jump back to the normal fast path.
16131 */
16132 auditsys:
16133- movq %r10,%r9 /* 6th arg: 4th syscall arg */
16134+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16135 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16136 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16137 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16138 movq %rax,%rsi /* 2nd arg: syscall number */
16139 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16140 call __audit_syscall_entry
16141+
16142+ pax_erase_kstack
16143+
16144 LOAD_ARGS 0 /* reload call-clobbered registers */
16145+ pax_set_fptr_mask
16146 jmp system_call_fastpath
16147
16148 /*
16149@@ -585,7 +880,7 @@ sysret_audit:
16150 /* Do syscall tracing */
16151 tracesys:
16152 #ifdef CONFIG_AUDITSYSCALL
16153- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16154+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
16155 jz auditsys
16156 #endif
16157 SAVE_REST
16158@@ -593,12 +888,16 @@ tracesys:
16159 FIXUP_TOP_OF_STACK %rdi
16160 movq %rsp,%rdi
16161 call syscall_trace_enter
16162+
16163+ pax_erase_kstack
16164+
16165 /*
16166 * Reload arg registers from stack in case ptrace changed them.
16167 * We don't reload %rax because syscall_trace_enter() returned
16168 * the value it wants us to use in the table lookup.
16169 */
16170 LOAD_ARGS ARGOFFSET, 1
16171+ pax_set_fptr_mask
16172 RESTORE_REST
16173 #if __SYSCALL_MASK == ~0
16174 cmpq $__NR_syscall_max,%rax
16175@@ -607,7 +906,7 @@ tracesys:
16176 cmpl $__NR_syscall_max,%eax
16177 #endif
16178 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16179- movq %r10,%rcx /* fixup for C */
16180+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16181 call *sys_call_table(,%rax,8)
16182 movq %rax,RAX-ARGOFFSET(%rsp)
16183 /* Use IRET because user could have changed frame */
16184@@ -628,6 +927,7 @@ GLOBAL(int_with_check)
16185 andl %edi,%edx
16186 jnz int_careful
16187 andl $~TS_COMPAT,TI_status(%rcx)
16188+ pax_erase_kstack
16189 jmp retint_swapgs
16190
16191 /* Either reschedule or signal or syscall exit tracking needed. */
16192@@ -674,7 +974,7 @@ int_restore_rest:
16193 TRACE_IRQS_OFF
16194 jmp int_with_check
16195 CFI_ENDPROC
16196-END(system_call)
16197+ENDPROC(system_call)
16198
16199 /*
16200 * Certain special system calls that need to save a complete full stack frame.
16201@@ -690,7 +990,7 @@ ENTRY(\label)
16202 call \func
16203 jmp ptregscall_common
16204 CFI_ENDPROC
16205-END(\label)
16206+ENDPROC(\label)
16207 .endm
16208
16209 PTREGSCALL stub_clone, sys_clone, %r8
16210@@ -708,9 +1008,10 @@ ENTRY(ptregscall_common)
16211 movq_cfi_restore R12+8, r12
16212 movq_cfi_restore RBP+8, rbp
16213 movq_cfi_restore RBX+8, rbx
16214+ pax_force_retaddr
16215 ret $REST_SKIP /* pop extended registers */
16216 CFI_ENDPROC
16217-END(ptregscall_common)
16218+ENDPROC(ptregscall_common)
16219
16220 ENTRY(stub_execve)
16221 CFI_STARTPROC
16222@@ -725,7 +1026,7 @@ ENTRY(stub_execve)
16223 RESTORE_REST
16224 jmp int_ret_from_sys_call
16225 CFI_ENDPROC
16226-END(stub_execve)
16227+ENDPROC(stub_execve)
16228
16229 /*
16230 * sigreturn is special because it needs to restore all registers on return.
16231@@ -743,7 +1044,7 @@ ENTRY(stub_rt_sigreturn)
16232 RESTORE_REST
16233 jmp int_ret_from_sys_call
16234 CFI_ENDPROC
16235-END(stub_rt_sigreturn)
16236+ENDPROC(stub_rt_sigreturn)
16237
16238 #ifdef CONFIG_X86_X32_ABI
16239 PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
16240@@ -812,7 +1113,7 @@ vector=vector+1
16241 2: jmp common_interrupt
16242 .endr
16243 CFI_ENDPROC
16244-END(irq_entries_start)
16245+ENDPROC(irq_entries_start)
16246
16247 .previous
16248 END(interrupt)
16249@@ -832,6 +1133,16 @@ END(interrupt)
16250 subq $ORIG_RAX-RBP, %rsp
16251 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
16252 SAVE_ARGS_IRQ
16253+#ifdef CONFIG_PAX_MEMORY_UDEREF
16254+ testb $3, CS(%rdi)
16255+ jnz 1f
16256+ pax_enter_kernel
16257+ jmp 2f
16258+1: pax_enter_kernel_user
16259+2:
16260+#else
16261+ pax_enter_kernel
16262+#endif
16263 call \func
16264 .endm
16265
16266@@ -863,7 +1174,7 @@ ret_from_intr:
16267
16268 exit_intr:
16269 GET_THREAD_INFO(%rcx)
16270- testl $3,CS-ARGOFFSET(%rsp)
16271+ testb $3,CS-ARGOFFSET(%rsp)
16272 je retint_kernel
16273
16274 /* Interrupt came from user space */
16275@@ -885,12 +1196,15 @@ retint_swapgs: /* return to user-space */
16276 * The iretq could re-enable interrupts:
16277 */
16278 DISABLE_INTERRUPTS(CLBR_ANY)
16279+ pax_exit_kernel_user
16280 TRACE_IRQS_IRETQ
16281 SWAPGS
16282 jmp restore_args
16283
16284 retint_restore_args: /* return to kernel space */
16285 DISABLE_INTERRUPTS(CLBR_ANY)
16286+ pax_exit_kernel
16287+ pax_force_retaddr RIP-ARGOFFSET
16288 /*
16289 * The iretq could re-enable interrupts:
16290 */
16291@@ -979,7 +1293,7 @@ ENTRY(retint_kernel)
16292 #endif
16293
16294 CFI_ENDPROC
16295-END(common_interrupt)
16296+ENDPROC(common_interrupt)
16297 /*
16298 * End of kprobes section
16299 */
16300@@ -996,7 +1310,7 @@ ENTRY(\sym)
16301 interrupt \do_sym
16302 jmp ret_from_intr
16303 CFI_ENDPROC
16304-END(\sym)
16305+ENDPROC(\sym)
16306 .endm
16307
16308 #ifdef CONFIG_SMP
16309@@ -1069,12 +1383,22 @@ ENTRY(\sym)
16310 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16311 call error_entry
16312 DEFAULT_FRAME 0
16313+#ifdef CONFIG_PAX_MEMORY_UDEREF
16314+ testb $3, CS(%rsp)
16315+ jnz 1f
16316+ pax_enter_kernel
16317+ jmp 2f
16318+1: pax_enter_kernel_user
16319+2:
16320+#else
16321+ pax_enter_kernel
16322+#endif
16323 movq %rsp,%rdi /* pt_regs pointer */
16324 xorl %esi,%esi /* no error code */
16325 call \do_sym
16326 jmp error_exit /* %ebx: no swapgs flag */
16327 CFI_ENDPROC
16328-END(\sym)
16329+ENDPROC(\sym)
16330 .endm
16331
16332 .macro paranoidzeroentry sym do_sym
16333@@ -1086,15 +1410,25 @@ ENTRY(\sym)
16334 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16335 call save_paranoid
16336 TRACE_IRQS_OFF
16337+#ifdef CONFIG_PAX_MEMORY_UDEREF
16338+ testb $3, CS(%rsp)
16339+ jnz 1f
16340+ pax_enter_kernel
16341+ jmp 2f
16342+1: pax_enter_kernel_user
16343+2:
16344+#else
16345+ pax_enter_kernel
16346+#endif
16347 movq %rsp,%rdi /* pt_regs pointer */
16348 xorl %esi,%esi /* no error code */
16349 call \do_sym
16350 jmp paranoid_exit /* %ebx: no swapgs flag */
16351 CFI_ENDPROC
16352-END(\sym)
16353+ENDPROC(\sym)
16354 .endm
16355
16356-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
16357+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
16358 .macro paranoidzeroentry_ist sym do_sym ist
16359 ENTRY(\sym)
16360 INTR_FRAME
16361@@ -1104,14 +1438,30 @@ ENTRY(\sym)
16362 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16363 call save_paranoid
16364 TRACE_IRQS_OFF
16365+#ifdef CONFIG_PAX_MEMORY_UDEREF
16366+ testb $3, CS(%rsp)
16367+ jnz 1f
16368+ pax_enter_kernel
16369+ jmp 2f
16370+1: pax_enter_kernel_user
16371+2:
16372+#else
16373+ pax_enter_kernel
16374+#endif
16375 movq %rsp,%rdi /* pt_regs pointer */
16376 xorl %esi,%esi /* no error code */
16377+#ifdef CONFIG_SMP
16378+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
16379+ lea init_tss(%r12), %r12
16380+#else
16381+ lea init_tss(%rip), %r12
16382+#endif
16383 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16384 call \do_sym
16385 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16386 jmp paranoid_exit /* %ebx: no swapgs flag */
16387 CFI_ENDPROC
16388-END(\sym)
16389+ENDPROC(\sym)
16390 .endm
16391
16392 .macro errorentry sym do_sym
16393@@ -1122,13 +1472,23 @@ ENTRY(\sym)
16394 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16395 call error_entry
16396 DEFAULT_FRAME 0
16397+#ifdef CONFIG_PAX_MEMORY_UDEREF
16398+ testb $3, CS(%rsp)
16399+ jnz 1f
16400+ pax_enter_kernel
16401+ jmp 2f
16402+1: pax_enter_kernel_user
16403+2:
16404+#else
16405+ pax_enter_kernel
16406+#endif
16407 movq %rsp,%rdi /* pt_regs pointer */
16408 movq ORIG_RAX(%rsp),%rsi /* get error code */
16409 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16410 call \do_sym
16411 jmp error_exit /* %ebx: no swapgs flag */
16412 CFI_ENDPROC
16413-END(\sym)
16414+ENDPROC(\sym)
16415 .endm
16416
16417 /* error code is on the stack already */
16418@@ -1141,13 +1501,23 @@ ENTRY(\sym)
16419 call save_paranoid
16420 DEFAULT_FRAME 0
16421 TRACE_IRQS_OFF
16422+#ifdef CONFIG_PAX_MEMORY_UDEREF
16423+ testb $3, CS(%rsp)
16424+ jnz 1f
16425+ pax_enter_kernel
16426+ jmp 2f
16427+1: pax_enter_kernel_user
16428+2:
16429+#else
16430+ pax_enter_kernel
16431+#endif
16432 movq %rsp,%rdi /* pt_regs pointer */
16433 movq ORIG_RAX(%rsp),%rsi /* get error code */
16434 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16435 call \do_sym
16436 jmp paranoid_exit /* %ebx: no swapgs flag */
16437 CFI_ENDPROC
16438-END(\sym)
16439+ENDPROC(\sym)
16440 .endm
16441
16442 zeroentry divide_error do_divide_error
16443@@ -1177,9 +1547,10 @@ gs_change:
16444 2: mfence /* workaround */
16445 SWAPGS
16446 popfq_cfi
16447+ pax_force_retaddr
16448 ret
16449 CFI_ENDPROC
16450-END(native_load_gs_index)
16451+ENDPROC(native_load_gs_index)
16452
16453 .section __ex_table,"a"
16454 .align 8
16455@@ -1201,13 +1572,14 @@ ENTRY(kernel_thread_helper)
16456 * Here we are in the child and the registers are set as they were
16457 * at kernel_thread() invocation in the parent.
16458 */
16459+ pax_force_fptr %rsi
16460 call *%rsi
16461 # exit
16462 mov %eax, %edi
16463 call do_exit
16464 ud2 # padding for call trace
16465 CFI_ENDPROC
16466-END(kernel_thread_helper)
16467+ENDPROC(kernel_thread_helper)
16468
16469 /*
16470 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16471@@ -1234,11 +1606,11 @@ ENTRY(kernel_execve)
16472 RESTORE_REST
16473 testq %rax,%rax
16474 je int_ret_from_sys_call
16475- RESTORE_ARGS
16476 UNFAKE_STACK_FRAME
16477+ pax_force_retaddr
16478 ret
16479 CFI_ENDPROC
16480-END(kernel_execve)
16481+ENDPROC(kernel_execve)
16482
16483 /* Call softirq on interrupt stack. Interrupts are off. */
16484 ENTRY(call_softirq)
16485@@ -1256,9 +1628,10 @@ ENTRY(call_softirq)
16486 CFI_DEF_CFA_REGISTER rsp
16487 CFI_ADJUST_CFA_OFFSET -8
16488 decl PER_CPU_VAR(irq_count)
16489+ pax_force_retaddr
16490 ret
16491 CFI_ENDPROC
16492-END(call_softirq)
16493+ENDPROC(call_softirq)
16494
16495 #ifdef CONFIG_XEN
16496 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16497@@ -1296,7 +1669,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16498 decl PER_CPU_VAR(irq_count)
16499 jmp error_exit
16500 CFI_ENDPROC
16501-END(xen_do_hypervisor_callback)
16502+ENDPROC(xen_do_hypervisor_callback)
16503
16504 /*
16505 * Hypervisor uses this for application faults while it executes.
16506@@ -1355,7 +1728,7 @@ ENTRY(xen_failsafe_callback)
16507 SAVE_ALL
16508 jmp error_exit
16509 CFI_ENDPROC
16510-END(xen_failsafe_callback)
16511+ENDPROC(xen_failsafe_callback)
16512
16513 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
16514 xen_hvm_callback_vector xen_evtchn_do_upcall
16515@@ -1404,16 +1777,31 @@ ENTRY(paranoid_exit)
16516 TRACE_IRQS_OFF
16517 testl %ebx,%ebx /* swapgs needed? */
16518 jnz paranoid_restore
16519- testl $3,CS(%rsp)
16520+ testb $3,CS(%rsp)
16521 jnz paranoid_userspace
16522+#ifdef CONFIG_PAX_MEMORY_UDEREF
16523+ pax_exit_kernel
16524+ TRACE_IRQS_IRETQ 0
16525+ SWAPGS_UNSAFE_STACK
16526+ RESTORE_ALL 8
16527+ pax_force_retaddr_bts
16528+ jmp irq_return
16529+#endif
16530 paranoid_swapgs:
16531+#ifdef CONFIG_PAX_MEMORY_UDEREF
16532+ pax_exit_kernel_user
16533+#else
16534+ pax_exit_kernel
16535+#endif
16536 TRACE_IRQS_IRETQ 0
16537 SWAPGS_UNSAFE_STACK
16538 RESTORE_ALL 8
16539 jmp irq_return
16540 paranoid_restore:
16541+ pax_exit_kernel
16542 TRACE_IRQS_IRETQ 0
16543 RESTORE_ALL 8
16544+ pax_force_retaddr_bts
16545 jmp irq_return
16546 paranoid_userspace:
16547 GET_THREAD_INFO(%rcx)
16548@@ -1442,7 +1830,7 @@ paranoid_schedule:
16549 TRACE_IRQS_OFF
16550 jmp paranoid_userspace
16551 CFI_ENDPROC
16552-END(paranoid_exit)
16553+ENDPROC(paranoid_exit)
16554
16555 /*
16556 * Exception entry point. This expects an error code/orig_rax on the stack.
16557@@ -1469,12 +1857,13 @@ ENTRY(error_entry)
16558 movq_cfi r14, R14+8
16559 movq_cfi r15, R15+8
16560 xorl %ebx,%ebx
16561- testl $3,CS+8(%rsp)
16562+ testb $3,CS+8(%rsp)
16563 je error_kernelspace
16564 error_swapgs:
16565 SWAPGS
16566 error_sti:
16567 TRACE_IRQS_OFF
16568+ pax_force_retaddr_bts
16569 ret
16570
16571 /*
16572@@ -1501,7 +1890,7 @@ bstep_iret:
16573 movq %rcx,RIP+8(%rsp)
16574 jmp error_swapgs
16575 CFI_ENDPROC
16576-END(error_entry)
16577+ENDPROC(error_entry)
16578
16579
16580 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16581@@ -1521,7 +1910,7 @@ ENTRY(error_exit)
16582 jnz retint_careful
16583 jmp retint_swapgs
16584 CFI_ENDPROC
16585-END(error_exit)
16586+ENDPROC(error_exit)
16587
16588 /*
16589 * Test if a given stack is an NMI stack or not.
16590@@ -1579,9 +1968,11 @@ ENTRY(nmi)
16591 * If %cs was not the kernel segment, then the NMI triggered in user
16592 * space, which means it is definitely not nested.
16593 */
16594+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
16595+ je 1f
16596 cmpl $__KERNEL_CS, 16(%rsp)
16597 jne first_nmi
16598-
16599+1:
16600 /*
16601 * Check the special variable on the stack to see if NMIs are
16602 * executing.
16603@@ -1728,6 +2119,16 @@ end_repeat_nmi:
16604 */
16605 call save_paranoid
16606 DEFAULT_FRAME 0
16607+#ifdef CONFIG_PAX_MEMORY_UDEREF
16608+ testb $3, CS(%rsp)
16609+ jnz 1f
16610+ pax_enter_kernel
16611+ jmp 2f
16612+1: pax_enter_kernel_user
16613+2:
16614+#else
16615+ pax_enter_kernel
16616+#endif
16617 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16618 movq %rsp,%rdi
16619 movq $-1,%rsi
16620@@ -1735,21 +2136,32 @@ end_repeat_nmi:
16621 testl %ebx,%ebx /* swapgs needed? */
16622 jnz nmi_restore
16623 nmi_swapgs:
16624+#ifdef CONFIG_PAX_MEMORY_UDEREF
16625+ pax_exit_kernel_user
16626+#else
16627+ pax_exit_kernel
16628+#endif
16629 SWAPGS_UNSAFE_STACK
16630+ RESTORE_ALL 8
16631+ /* Clear the NMI executing stack variable */
16632+ movq $0, 10*8(%rsp)
16633+ jmp irq_return
16634 nmi_restore:
16635+ pax_exit_kernel
16636 RESTORE_ALL 8
16637+ pax_force_retaddr_bts
16638 /* Clear the NMI executing stack variable */
16639 movq $0, 10*8(%rsp)
16640 jmp irq_return
16641 CFI_ENDPROC
16642-END(nmi)
16643+ENDPROC(nmi)
16644
16645 ENTRY(ignore_sysret)
16646 CFI_STARTPROC
16647 mov $-ENOSYS,%eax
16648 sysret
16649 CFI_ENDPROC
16650-END(ignore_sysret)
16651+ENDPROC(ignore_sysret)
16652
16653 /*
16654 * End of kprobes section
16655diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16656index c9a281f..ce2f317 100644
16657--- a/arch/x86/kernel/ftrace.c
16658+++ b/arch/x86/kernel/ftrace.c
16659@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16660 static const void *mod_code_newcode; /* holds the text to write to the IP */
16661
16662 static unsigned nmi_wait_count;
16663-static atomic_t nmi_update_count = ATOMIC_INIT(0);
16664+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16665
16666 int ftrace_arch_read_dyn_info(char *buf, int size)
16667 {
16668@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16669
16670 r = snprintf(buf, size, "%u %u",
16671 nmi_wait_count,
16672- atomic_read(&nmi_update_count));
16673+ atomic_read_unchecked(&nmi_update_count));
16674 return r;
16675 }
16676
16677@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
16678
16679 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16680 smp_rmb();
16681+ pax_open_kernel();
16682 ftrace_mod_code();
16683- atomic_inc(&nmi_update_count);
16684+ pax_close_kernel();
16685+ atomic_inc_unchecked(&nmi_update_count);
16686 }
16687 /* Must have previous changes seen before executions */
16688 smp_mb();
16689@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
16690 {
16691 unsigned char replaced[MCOUNT_INSN_SIZE];
16692
16693+ ip = ktla_ktva(ip);
16694+
16695 /*
16696 * Note: Due to modules and __init, code can
16697 * disappear and change, we need to protect against faulting
16698@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16699 unsigned char old[MCOUNT_INSN_SIZE], *new;
16700 int ret;
16701
16702- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16703+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16704 new = ftrace_call_replace(ip, (unsigned long)func);
16705 ret = ftrace_modify_code(ip, old, new);
16706
16707@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16708 {
16709 unsigned char code[MCOUNT_INSN_SIZE];
16710
16711+ ip = ktla_ktva(ip);
16712+
16713 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16714 return -EFAULT;
16715
16716diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16717index 51ff186..9e77418 100644
16718--- a/arch/x86/kernel/head32.c
16719+++ b/arch/x86/kernel/head32.c
16720@@ -19,6 +19,7 @@
16721 #include <asm/io_apic.h>
16722 #include <asm/bios_ebda.h>
16723 #include <asm/tlbflush.h>
16724+#include <asm/boot.h>
16725
16726 static void __init i386_default_early_setup(void)
16727 {
16728@@ -31,8 +32,7 @@ static void __init i386_default_early_setup(void)
16729
16730 void __init i386_start_kernel(void)
16731 {
16732- memblock_reserve(__pa_symbol(&_text),
16733- __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
16734+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
16735
16736 #ifdef CONFIG_BLK_DEV_INITRD
16737 /* Reserve INITRD */
16738diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16739index ce0be7c..c41476e 100644
16740--- a/arch/x86/kernel/head_32.S
16741+++ b/arch/x86/kernel/head_32.S
16742@@ -25,6 +25,12 @@
16743 /* Physical address */
16744 #define pa(X) ((X) - __PAGE_OFFSET)
16745
16746+#ifdef CONFIG_PAX_KERNEXEC
16747+#define ta(X) (X)
16748+#else
16749+#define ta(X) ((X) - __PAGE_OFFSET)
16750+#endif
16751+
16752 /*
16753 * References to members of the new_cpu_data structure.
16754 */
16755@@ -54,11 +60,7 @@
16756 * and small than max_low_pfn, otherwise will waste some page table entries
16757 */
16758
16759-#if PTRS_PER_PMD > 1
16760-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16761-#else
16762-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16763-#endif
16764+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16765
16766 /* Number of possible pages in the lowmem region */
16767 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
16768@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
16769 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16770
16771 /*
16772+ * Real beginning of normal "text" segment
16773+ */
16774+ENTRY(stext)
16775+ENTRY(_stext)
16776+
16777+/*
16778 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16779 * %esi points to the real-mode code as a 32-bit pointer.
16780 * CS and DS must be 4 GB flat segments, but we don't depend on
16781@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16782 * can.
16783 */
16784 __HEAD
16785+
16786+#ifdef CONFIG_PAX_KERNEXEC
16787+ jmp startup_32
16788+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16789+.fill PAGE_SIZE-5,1,0xcc
16790+#endif
16791+
16792 ENTRY(startup_32)
16793 movl pa(stack_start),%ecx
16794
16795@@ -105,6 +120,57 @@ ENTRY(startup_32)
16796 2:
16797 leal -__PAGE_OFFSET(%ecx),%esp
16798
16799+#ifdef CONFIG_SMP
16800+ movl $pa(cpu_gdt_table),%edi
16801+ movl $__per_cpu_load,%eax
16802+ movw %ax,__KERNEL_PERCPU + 2(%edi)
16803+ rorl $16,%eax
16804+ movb %al,__KERNEL_PERCPU + 4(%edi)
16805+ movb %ah,__KERNEL_PERCPU + 7(%edi)
16806+ movl $__per_cpu_end - 1,%eax
16807+ subl $__per_cpu_start,%eax
16808+ movw %ax,__KERNEL_PERCPU + 0(%edi)
16809+#endif
16810+
16811+#ifdef CONFIG_PAX_MEMORY_UDEREF
16812+ movl $NR_CPUS,%ecx
16813+ movl $pa(cpu_gdt_table),%edi
16814+1:
16815+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16816+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16817+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16818+ addl $PAGE_SIZE_asm,%edi
16819+ loop 1b
16820+#endif
16821+
16822+#ifdef CONFIG_PAX_KERNEXEC
16823+ movl $pa(boot_gdt),%edi
16824+ movl $__LOAD_PHYSICAL_ADDR,%eax
16825+ movw %ax,__BOOT_CS + 2(%edi)
16826+ rorl $16,%eax
16827+ movb %al,__BOOT_CS + 4(%edi)
16828+ movb %ah,__BOOT_CS + 7(%edi)
16829+ rorl $16,%eax
16830+
16831+ ljmp $(__BOOT_CS),$1f
16832+1:
16833+
16834+ movl $NR_CPUS,%ecx
16835+ movl $pa(cpu_gdt_table),%edi
16836+ addl $__PAGE_OFFSET,%eax
16837+1:
16838+ movw %ax,__KERNEL_CS + 2(%edi)
16839+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16840+ rorl $16,%eax
16841+ movb %al,__KERNEL_CS + 4(%edi)
16842+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16843+ movb %ah,__KERNEL_CS + 7(%edi)
16844+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16845+ rorl $16,%eax
16846+ addl $PAGE_SIZE_asm,%edi
16847+ loop 1b
16848+#endif
16849+
16850 /*
16851 * Clear BSS first so that there are no surprises...
16852 */
16853@@ -195,8 +261,11 @@ ENTRY(startup_32)
16854 movl %eax, pa(max_pfn_mapped)
16855
16856 /* Do early initialization of the fixmap area */
16857- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16858- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
16859+#ifdef CONFIG_COMPAT_VDSO
16860+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
16861+#else
16862+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
16863+#endif
16864 #else /* Not PAE */
16865
16866 page_pde_offset = (__PAGE_OFFSET >> 20);
16867@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16868 movl %eax, pa(max_pfn_mapped)
16869
16870 /* Do early initialization of the fixmap area */
16871- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16872- movl %eax,pa(initial_page_table+0xffc)
16873+#ifdef CONFIG_COMPAT_VDSO
16874+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
16875+#else
16876+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
16877+#endif
16878 #endif
16879
16880 #ifdef CONFIG_PARAVIRT
16881@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16882 cmpl $num_subarch_entries, %eax
16883 jae bad_subarch
16884
16885- movl pa(subarch_entries)(,%eax,4), %eax
16886- subl $__PAGE_OFFSET, %eax
16887- jmp *%eax
16888+ jmp *pa(subarch_entries)(,%eax,4)
16889
16890 bad_subarch:
16891 WEAK(lguest_entry)
16892@@ -255,10 +325,10 @@ WEAK(xen_entry)
16893 __INITDATA
16894
16895 subarch_entries:
16896- .long default_entry /* normal x86/PC */
16897- .long lguest_entry /* lguest hypervisor */
16898- .long xen_entry /* Xen hypervisor */
16899- .long default_entry /* Moorestown MID */
16900+ .long ta(default_entry) /* normal x86/PC */
16901+ .long ta(lguest_entry) /* lguest hypervisor */
16902+ .long ta(xen_entry) /* Xen hypervisor */
16903+ .long ta(default_entry) /* Moorestown MID */
16904 num_subarch_entries = (. - subarch_entries) / 4
16905 .previous
16906 #else
16907@@ -312,6 +382,7 @@ default_entry:
16908 orl %edx,%eax
16909 movl %eax,%cr4
16910
16911+#ifdef CONFIG_X86_PAE
16912 testb $X86_CR4_PAE, %al # check if PAE is enabled
16913 jz 6f
16914
16915@@ -340,6 +411,9 @@ default_entry:
16916 /* Make changes effective */
16917 wrmsr
16918
16919+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16920+#endif
16921+
16922 6:
16923
16924 /*
16925@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
16926 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
16927 movl %eax,%ss # after changing gdt.
16928
16929- movl $(__USER_DS),%eax # DS/ES contains default USER segment
16930+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
16931 movl %eax,%ds
16932 movl %eax,%es
16933
16934@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
16935 */
16936 cmpb $0,ready
16937 jne 1f
16938- movl $gdt_page,%eax
16939+ movl $cpu_gdt_table,%eax
16940 movl $stack_canary,%ecx
16941+#ifdef CONFIG_SMP
16942+ addl $__per_cpu_load,%ecx
16943+#endif
16944 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
16945 shrl $16, %ecx
16946 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
16947 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
16948 1:
16949-#endif
16950 movl $(__KERNEL_STACK_CANARY),%eax
16951+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
16952+ movl $(__USER_DS),%eax
16953+#else
16954+ xorl %eax,%eax
16955+#endif
16956 movl %eax,%gs
16957
16958 xorl %eax,%eax # Clear LDT
16959@@ -558,22 +639,22 @@ early_page_fault:
16960 jmp early_fault
16961
16962 early_fault:
16963- cld
16964 #ifdef CONFIG_PRINTK
16965+ cmpl $1,%ss:early_recursion_flag
16966+ je hlt_loop
16967+ incl %ss:early_recursion_flag
16968+ cld
16969 pusha
16970 movl $(__KERNEL_DS),%eax
16971 movl %eax,%ds
16972 movl %eax,%es
16973- cmpl $2,early_recursion_flag
16974- je hlt_loop
16975- incl early_recursion_flag
16976 movl %cr2,%eax
16977 pushl %eax
16978 pushl %edx /* trapno */
16979 pushl $fault_msg
16980 call printk
16981+; call dump_stack
16982 #endif
16983- call dump_stack
16984 hlt_loop:
16985 hlt
16986 jmp hlt_loop
16987@@ -581,8 +662,11 @@ hlt_loop:
16988 /* This is the default interrupt "handler" :-) */
16989 ALIGN
16990 ignore_int:
16991- cld
16992 #ifdef CONFIG_PRINTK
16993+ cmpl $2,%ss:early_recursion_flag
16994+ je hlt_loop
16995+ incl %ss:early_recursion_flag
16996+ cld
16997 pushl %eax
16998 pushl %ecx
16999 pushl %edx
17000@@ -591,9 +675,6 @@ ignore_int:
17001 movl $(__KERNEL_DS),%eax
17002 movl %eax,%ds
17003 movl %eax,%es
17004- cmpl $2,early_recursion_flag
17005- je hlt_loop
17006- incl early_recursion_flag
17007 pushl 16(%esp)
17008 pushl 24(%esp)
17009 pushl 32(%esp)
17010@@ -622,29 +703,43 @@ ENTRY(initial_code)
17011 /*
17012 * BSS section
17013 */
17014-__PAGE_ALIGNED_BSS
17015- .align PAGE_SIZE
17016 #ifdef CONFIG_X86_PAE
17017+.section .initial_pg_pmd,"a",@progbits
17018 initial_pg_pmd:
17019 .fill 1024*KPMDS,4,0
17020 #else
17021+.section .initial_page_table,"a",@progbits
17022 ENTRY(initial_page_table)
17023 .fill 1024,4,0
17024 #endif
17025+.section .initial_pg_fixmap,"a",@progbits
17026 initial_pg_fixmap:
17027 .fill 1024,4,0
17028+.section .empty_zero_page,"a",@progbits
17029 ENTRY(empty_zero_page)
17030 .fill 4096,1,0
17031+.section .swapper_pg_dir,"a",@progbits
17032 ENTRY(swapper_pg_dir)
17033+#ifdef CONFIG_X86_PAE
17034+ .fill 4,8,0
17035+#else
17036 .fill 1024,4,0
17037+#endif
17038+
17039+/*
17040+ * The IDT has to be page-aligned to simplify the Pentium
17041+ * F0 0F bug workaround.. We have a special link segment
17042+ * for this.
17043+ */
17044+.section .idt,"a",@progbits
17045+ENTRY(idt_table)
17046+ .fill 256,8,0
17047
17048 /*
17049 * This starts the data section.
17050 */
17051 #ifdef CONFIG_X86_PAE
17052-__PAGE_ALIGNED_DATA
17053- /* Page-aligned for the benefit of paravirt? */
17054- .align PAGE_SIZE
17055+.section .initial_page_table,"a",@progbits
17056 ENTRY(initial_page_table)
17057 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17058 # if KPMDS == 3
17059@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
17060 # error "Kernel PMDs should be 1, 2 or 3"
17061 # endif
17062 .align PAGE_SIZE /* needs to be page-sized too */
17063+
17064+#ifdef CONFIG_PAX_PER_CPU_PGD
17065+ENTRY(cpu_pgd)
17066+ .rept NR_CPUS
17067+ .fill 4,8,0
17068+ .endr
17069+#endif
17070+
17071 #endif
17072
17073 .data
17074 .balign 4
17075 ENTRY(stack_start)
17076- .long init_thread_union+THREAD_SIZE
17077+ .long init_thread_union+THREAD_SIZE-8
17078
17079+ready: .byte 0
17080+
17081+.section .rodata,"a",@progbits
17082 early_recursion_flag:
17083 .long 0
17084
17085-ready: .byte 0
17086-
17087 int_msg:
17088 .asciz "Unknown interrupt or fault at: %p %p %p\n"
17089
17090@@ -707,7 +811,7 @@ fault_msg:
17091 .word 0 # 32 bit align gdt_desc.address
17092 boot_gdt_descr:
17093 .word __BOOT_DS+7
17094- .long boot_gdt - __PAGE_OFFSET
17095+ .long pa(boot_gdt)
17096
17097 .word 0 # 32-bit align idt_desc.address
17098 idt_descr:
17099@@ -718,7 +822,7 @@ idt_descr:
17100 .word 0 # 32 bit align gdt_desc.address
17101 ENTRY(early_gdt_descr)
17102 .word GDT_ENTRIES*8-1
17103- .long gdt_page /* Overwritten for secondary CPUs */
17104+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
17105
17106 /*
17107 * The boot_gdt must mirror the equivalent in setup.S and is
17108@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
17109 .align L1_CACHE_BYTES
17110 ENTRY(boot_gdt)
17111 .fill GDT_ENTRY_BOOT_CS,8,0
17112- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17113- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17114+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17115+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17116+
17117+ .align PAGE_SIZE_asm
17118+ENTRY(cpu_gdt_table)
17119+ .rept NR_CPUS
17120+ .quad 0x0000000000000000 /* NULL descriptor */
17121+ .quad 0x0000000000000000 /* 0x0b reserved */
17122+ .quad 0x0000000000000000 /* 0x13 reserved */
17123+ .quad 0x0000000000000000 /* 0x1b reserved */
17124+
17125+#ifdef CONFIG_PAX_KERNEXEC
17126+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17127+#else
17128+ .quad 0x0000000000000000 /* 0x20 unused */
17129+#endif
17130+
17131+ .quad 0x0000000000000000 /* 0x28 unused */
17132+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17133+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17134+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17135+ .quad 0x0000000000000000 /* 0x4b reserved */
17136+ .quad 0x0000000000000000 /* 0x53 reserved */
17137+ .quad 0x0000000000000000 /* 0x5b reserved */
17138+
17139+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17140+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17141+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17142+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17143+
17144+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17145+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17146+
17147+ /*
17148+ * Segments used for calling PnP BIOS have byte granularity.
17149+ * The code segments and data segments have fixed 64k limits,
17150+ * the transfer segment sizes are set at run time.
17151+ */
17152+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
17153+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
17154+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
17155+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
17156+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
17157+
17158+ /*
17159+ * The APM segments have byte granularity and their bases
17160+ * are set at run time. All have 64k limits.
17161+ */
17162+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17163+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17164+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
17165+
17166+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17167+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17168+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17169+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17170+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17171+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17172+
17173+ /* Be sure this is zeroed to avoid false validations in Xen */
17174+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17175+ .endr
17176diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17177index 40f4eb3..6d24d9d 100644
17178--- a/arch/x86/kernel/head_64.S
17179+++ b/arch/x86/kernel/head_64.S
17180@@ -19,6 +19,8 @@
17181 #include <asm/cache.h>
17182 #include <asm/processor-flags.h>
17183 #include <asm/percpu.h>
17184+#include <asm/cpufeature.h>
17185+#include <asm/alternative-asm.h>
17186
17187 #ifdef CONFIG_PARAVIRT
17188 #include <asm/asm-offsets.h>
17189@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17190 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17191 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17192 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17193+L4_VMALLOC_START = pgd_index(VMALLOC_START)
17194+L3_VMALLOC_START = pud_index(VMALLOC_START)
17195+L4_VMALLOC_END = pgd_index(VMALLOC_END)
17196+L3_VMALLOC_END = pud_index(VMALLOC_END)
17197+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17198+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17199
17200 .text
17201 __HEAD
17202@@ -85,35 +93,23 @@ startup_64:
17203 */
17204 addq %rbp, init_level4_pgt + 0(%rip)
17205 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17206+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17207+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
17208+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17209 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17210
17211 addq %rbp, level3_ident_pgt + 0(%rip)
17212+#ifndef CONFIG_XEN
17213+ addq %rbp, level3_ident_pgt + 8(%rip)
17214+#endif
17215
17216- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17217- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17218+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17219+
17220+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17221+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17222
17223 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17224-
17225- /* Add an Identity mapping if I am above 1G */
17226- leaq _text(%rip), %rdi
17227- andq $PMD_PAGE_MASK, %rdi
17228-
17229- movq %rdi, %rax
17230- shrq $PUD_SHIFT, %rax
17231- andq $(PTRS_PER_PUD - 1), %rax
17232- jz ident_complete
17233-
17234- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17235- leaq level3_ident_pgt(%rip), %rbx
17236- movq %rdx, 0(%rbx, %rax, 8)
17237-
17238- movq %rdi, %rax
17239- shrq $PMD_SHIFT, %rax
17240- andq $(PTRS_PER_PMD - 1), %rax
17241- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17242- leaq level2_spare_pgt(%rip), %rbx
17243- movq %rdx, 0(%rbx, %rax, 8)
17244-ident_complete:
17245+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17246
17247 /*
17248 * Fixup the kernel text+data virtual addresses. Note that
17249@@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
17250 * after the boot processor executes this code.
17251 */
17252
17253- /* Enable PAE mode and PGE */
17254- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17255+ /* Enable PAE mode and PSE/PGE */
17256+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17257 movq %rax, %cr4
17258
17259 /* Setup early boot stage 4 level pagetables. */
17260@@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
17261 movl $MSR_EFER, %ecx
17262 rdmsr
17263 btsl $_EFER_SCE, %eax /* Enable System Call */
17264- btl $20,%edi /* No Execute supported? */
17265+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17266 jnc 1f
17267 btsl $_EFER_NX, %eax
17268+ leaq init_level4_pgt(%rip), %rdi
17269+#ifndef CONFIG_EFI
17270+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17271+#endif
17272+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17273+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
17274+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17275+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
17276 1: wrmsr /* Make changes effective */
17277
17278 /* Setup cr0 */
17279@@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
17280 * jump. In addition we need to ensure %cs is set so we make this
17281 * a far return.
17282 */
17283+ pax_set_fptr_mask
17284 movq initial_code(%rip),%rax
17285 pushq $0 # fake return address to stop unwinder
17286 pushq $__KERNEL_CS # set correct cs
17287@@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
17288 bad_address:
17289 jmp bad_address
17290
17291- .section ".init.text","ax"
17292+ __INIT
17293 #ifdef CONFIG_EARLY_PRINTK
17294 .globl early_idt_handlers
17295 early_idt_handlers:
17296@@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
17297 #endif /* EARLY_PRINTK */
17298 1: hlt
17299 jmp 1b
17300+ .previous
17301
17302 #ifdef CONFIG_EARLY_PRINTK
17303+ __INITDATA
17304 early_recursion_flag:
17305 .long 0
17306+ .previous
17307
17308+ .section .rodata,"a",@progbits
17309 early_idt_msg:
17310 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17311 early_idt_ripmsg:
17312 .asciz "RIP %s\n"
17313+ .previous
17314 #endif /* CONFIG_EARLY_PRINTK */
17315- .previous
17316
17317+ .section .rodata,"a",@progbits
17318 #define NEXT_PAGE(name) \
17319 .balign PAGE_SIZE; \
17320 ENTRY(name)
17321@@ -338,7 +348,6 @@ ENTRY(name)
17322 i = i + 1 ; \
17323 .endr
17324
17325- .data
17326 /*
17327 * This default setting generates an ident mapping at address 0x100000
17328 * and a mapping for the kernel that precisely maps virtual address
17329@@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
17330 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17331 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17332 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17333+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
17334+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17335+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
17336+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
17337+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17338+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17339 .org init_level4_pgt + L4_START_KERNEL*8, 0
17340 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17341 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17342
17343+#ifdef CONFIG_PAX_PER_CPU_PGD
17344+NEXT_PAGE(cpu_pgd)
17345+ .rept NR_CPUS
17346+ .fill 512,8,0
17347+ .endr
17348+#endif
17349+
17350 NEXT_PAGE(level3_ident_pgt)
17351 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17352+#ifdef CONFIG_XEN
17353 .fill 511,8,0
17354+#else
17355+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17356+ .fill 510,8,0
17357+#endif
17358+
17359+NEXT_PAGE(level3_vmalloc_start_pgt)
17360+ .fill 512,8,0
17361+
17362+NEXT_PAGE(level3_vmalloc_end_pgt)
17363+ .fill 512,8,0
17364+
17365+NEXT_PAGE(level3_vmemmap_pgt)
17366+ .fill L3_VMEMMAP_START,8,0
17367+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17368
17369 NEXT_PAGE(level3_kernel_pgt)
17370 .fill L3_START_KERNEL,8,0
17371@@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
17372 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17373 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17374
17375+NEXT_PAGE(level2_vmemmap_pgt)
17376+ .fill 512,8,0
17377+
17378 NEXT_PAGE(level2_fixmap_pgt)
17379- .fill 506,8,0
17380- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17381- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17382- .fill 5,8,0
17383+ .fill 507,8,0
17384+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17385+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17386+ .fill 4,8,0
17387
17388-NEXT_PAGE(level1_fixmap_pgt)
17389+NEXT_PAGE(level1_vsyscall_pgt)
17390 .fill 512,8,0
17391
17392-NEXT_PAGE(level2_ident_pgt)
17393- /* Since I easily can, map the first 1G.
17394+ /* Since I easily can, map the first 2G.
17395 * Don't set NX because code runs from these pages.
17396 */
17397- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17398+NEXT_PAGE(level2_ident_pgt)
17399+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17400
17401 NEXT_PAGE(level2_kernel_pgt)
17402 /*
17403@@ -389,37 +429,59 @@ NEXT_PAGE(level2_kernel_pgt)
17404 * If you want to increase this then increase MODULES_VADDR
17405 * too.)
17406 */
17407- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17408- KERNEL_IMAGE_SIZE/PMD_SIZE)
17409-
17410-NEXT_PAGE(level2_spare_pgt)
17411- .fill 512, 8, 0
17412+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17413
17414 #undef PMDS
17415 #undef NEXT_PAGE
17416
17417- .data
17418+ .align PAGE_SIZE
17419+ENTRY(cpu_gdt_table)
17420+ .rept NR_CPUS
17421+ .quad 0x0000000000000000 /* NULL descriptor */
17422+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17423+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
17424+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
17425+ .quad 0x00cffb000000ffff /* __USER32_CS */
17426+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17427+ .quad 0x00affb000000ffff /* __USER_CS */
17428+
17429+#ifdef CONFIG_PAX_KERNEXEC
17430+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17431+#else
17432+ .quad 0x0 /* unused */
17433+#endif
17434+
17435+ .quad 0,0 /* TSS */
17436+ .quad 0,0 /* LDT */
17437+ .quad 0,0,0 /* three TLS descriptors */
17438+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
17439+ /* asm/segment.h:GDT_ENTRIES must match this */
17440+
17441+ /* zero the remaining page */
17442+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17443+ .endr
17444+
17445 .align 16
17446 .globl early_gdt_descr
17447 early_gdt_descr:
17448 .word GDT_ENTRIES*8-1
17449 early_gdt_descr_base:
17450- .quad INIT_PER_CPU_VAR(gdt_page)
17451+ .quad cpu_gdt_table
17452
17453 ENTRY(phys_base)
17454 /* This must match the first entry in level2_kernel_pgt */
17455 .quad 0x0000000000000000
17456
17457 #include "../../x86/xen/xen-head.S"
17458-
17459- .section .bss, "aw", @nobits
17460+
17461+ .section .rodata,"a",@progbits
17462 .align L1_CACHE_BYTES
17463 ENTRY(idt_table)
17464- .skip IDT_ENTRIES * 16
17465+ .fill 512,8,0
17466
17467 .align L1_CACHE_BYTES
17468 ENTRY(nmi_idt_table)
17469- .skip IDT_ENTRIES * 16
17470+ .fill 512,8,0
17471
17472 __PAGE_ALIGNED_BSS
17473 .align PAGE_SIZE
17474diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17475index 9c3bd4a..e1d9b35 100644
17476--- a/arch/x86/kernel/i386_ksyms_32.c
17477+++ b/arch/x86/kernel/i386_ksyms_32.c
17478@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17479 EXPORT_SYMBOL(cmpxchg8b_emu);
17480 #endif
17481
17482+EXPORT_SYMBOL_GPL(cpu_gdt_table);
17483+
17484 /* Networking helper routines. */
17485 EXPORT_SYMBOL(csum_partial_copy_generic);
17486+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17487+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17488
17489 EXPORT_SYMBOL(__get_user_1);
17490 EXPORT_SYMBOL(__get_user_2);
17491@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17492
17493 EXPORT_SYMBOL(csum_partial);
17494 EXPORT_SYMBOL(empty_zero_page);
17495+
17496+#ifdef CONFIG_PAX_KERNEXEC
17497+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17498+#endif
17499diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
17500index 2d6e649..df6e1af 100644
17501--- a/arch/x86/kernel/i387.c
17502+++ b/arch/x86/kernel/i387.c
17503@@ -59,7 +59,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
17504 static inline bool interrupted_user_mode(void)
17505 {
17506 struct pt_regs *regs = get_irq_regs();
17507- return regs && user_mode_vm(regs);
17508+ return regs && user_mode(regs);
17509 }
17510
17511 /*
17512diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17513index 36d1853..bf25736 100644
17514--- a/arch/x86/kernel/i8259.c
17515+++ b/arch/x86/kernel/i8259.c
17516@@ -209,7 +209,7 @@ spurious_8259A_irq:
17517 "spurious 8259A interrupt: IRQ%d.\n", irq);
17518 spurious_irq_mask |= irqmask;
17519 }
17520- atomic_inc(&irq_err_count);
17521+ atomic_inc_unchecked(&irq_err_count);
17522 /*
17523 * Theoretically we do not have to handle this IRQ,
17524 * but in Linux this does not cause problems and is
17525diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17526index 43e9ccf..44ccf6f 100644
17527--- a/arch/x86/kernel/init_task.c
17528+++ b/arch/x86/kernel/init_task.c
17529@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17530 * way process stacks are handled. This is done by having a special
17531 * "init_task" linker map entry..
17532 */
17533-union thread_union init_thread_union __init_task_data =
17534- { INIT_THREAD_INFO(init_task) };
17535+union thread_union init_thread_union __init_task_data;
17536
17537 /*
17538 * Initial task structure.
17539@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17540 * section. Since TSS's are completely CPU-local, we want them
17541 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17542 */
17543-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17544-
17545+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17546+EXPORT_SYMBOL(init_tss);
17547diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17548index 8c96897..be66bfa 100644
17549--- a/arch/x86/kernel/ioport.c
17550+++ b/arch/x86/kernel/ioport.c
17551@@ -6,6 +6,7 @@
17552 #include <linux/sched.h>
17553 #include <linux/kernel.h>
17554 #include <linux/capability.h>
17555+#include <linux/security.h>
17556 #include <linux/errno.h>
17557 #include <linux/types.h>
17558 #include <linux/ioport.h>
17559@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17560
17561 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17562 return -EINVAL;
17563+#ifdef CONFIG_GRKERNSEC_IO
17564+ if (turn_on && grsec_disable_privio) {
17565+ gr_handle_ioperm();
17566+ return -EPERM;
17567+ }
17568+#endif
17569 if (turn_on && !capable(CAP_SYS_RAWIO))
17570 return -EPERM;
17571
17572@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17573 * because the ->io_bitmap_max value must match the bitmap
17574 * contents:
17575 */
17576- tss = &per_cpu(init_tss, get_cpu());
17577+ tss = init_tss + get_cpu();
17578
17579 if (turn_on)
17580 bitmap_clear(t->io_bitmap_ptr, from, num);
17581@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
17582 return -EINVAL;
17583 /* Trying to gain more privileges? */
17584 if (level > old) {
17585+#ifdef CONFIG_GRKERNSEC_IO
17586+ if (grsec_disable_privio) {
17587+ gr_handle_iopl();
17588+ return -EPERM;
17589+ }
17590+#endif
17591 if (!capable(CAP_SYS_RAWIO))
17592 return -EPERM;
17593 }
17594diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17595index 3dafc60..aa8e9c4 100644
17596--- a/arch/x86/kernel/irq.c
17597+++ b/arch/x86/kernel/irq.c
17598@@ -18,7 +18,7 @@
17599 #include <asm/mce.h>
17600 #include <asm/hw_irq.h>
17601
17602-atomic_t irq_err_count;
17603+atomic_unchecked_t irq_err_count;
17604
17605 /* Function pointer for generic interrupt vector handling */
17606 void (*x86_platform_ipi_callback)(void) = NULL;
17607@@ -121,9 +121,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
17608 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17609 seq_printf(p, " Machine check polls\n");
17610 #endif
17611- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17612+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17613 #if defined(CONFIG_X86_IO_APIC)
17614- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17615+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17616 #endif
17617 return 0;
17618 }
17619@@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17620
17621 u64 arch_irq_stat(void)
17622 {
17623- u64 sum = atomic_read(&irq_err_count);
17624+ u64 sum = atomic_read_unchecked(&irq_err_count);
17625
17626 #ifdef CONFIG_X86_IO_APIC
17627- sum += atomic_read(&irq_mis_count);
17628+ sum += atomic_read_unchecked(&irq_mis_count);
17629 #endif
17630 return sum;
17631 }
17632diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17633index 58b7f27..e112d08 100644
17634--- a/arch/x86/kernel/irq_32.c
17635+++ b/arch/x86/kernel/irq_32.c
17636@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
17637 __asm__ __volatile__("andl %%esp,%0" :
17638 "=r" (sp) : "0" (THREAD_SIZE - 1));
17639
17640- return sp < (sizeof(struct thread_info) + STACK_WARN);
17641+ return sp < STACK_WARN;
17642 }
17643
17644 static void print_stack_overflow(void)
17645@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
17646 * per-CPU IRQ handling contexts (thread information and stack)
17647 */
17648 union irq_ctx {
17649- struct thread_info tinfo;
17650- u32 stack[THREAD_SIZE/sizeof(u32)];
17651+ unsigned long previous_esp;
17652+ u32 stack[THREAD_SIZE/sizeof(u32)];
17653 } __attribute__((aligned(THREAD_SIZE)));
17654
17655 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17656@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
17657 static inline int
17658 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17659 {
17660- union irq_ctx *curctx, *irqctx;
17661+ union irq_ctx *irqctx;
17662 u32 *isp, arg1, arg2;
17663
17664- curctx = (union irq_ctx *) current_thread_info();
17665 irqctx = __this_cpu_read(hardirq_ctx);
17666
17667 /*
17668@@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17669 * handler) we can't do that and just have to keep using the
17670 * current stack (which is the irq stack already after all)
17671 */
17672- if (unlikely(curctx == irqctx))
17673+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17674 return 0;
17675
17676 /* build the stack frame on the IRQ stack */
17677- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17678- irqctx->tinfo.task = curctx->tinfo.task;
17679- irqctx->tinfo.previous_esp = current_stack_pointer;
17680+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17681+ irqctx->previous_esp = current_stack_pointer;
17682
17683- /* Copy the preempt_count so that the [soft]irq checks work. */
17684- irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
17685+#ifdef CONFIG_PAX_MEMORY_UDEREF
17686+ __set_fs(MAKE_MM_SEG(0));
17687+#endif
17688
17689 if (unlikely(overflow))
17690 call_on_stack(print_stack_overflow, isp);
17691@@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17692 : "0" (irq), "1" (desc), "2" (isp),
17693 "D" (desc->handle_irq)
17694 : "memory", "cc", "ecx");
17695+
17696+#ifdef CONFIG_PAX_MEMORY_UDEREF
17697+ __set_fs(current_thread_info()->addr_limit);
17698+#endif
17699+
17700 return 1;
17701 }
17702
17703@@ -121,29 +125,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17704 */
17705 void __cpuinit irq_ctx_init(int cpu)
17706 {
17707- union irq_ctx *irqctx;
17708-
17709 if (per_cpu(hardirq_ctx, cpu))
17710 return;
17711
17712- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17713- THREAD_FLAGS,
17714- THREAD_ORDER));
17715- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17716- irqctx->tinfo.cpu = cpu;
17717- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17718- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17719-
17720- per_cpu(hardirq_ctx, cpu) = irqctx;
17721-
17722- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17723- THREAD_FLAGS,
17724- THREAD_ORDER));
17725- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17726- irqctx->tinfo.cpu = cpu;
17727- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17728-
17729- per_cpu(softirq_ctx, cpu) = irqctx;
17730+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17731+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17732
17733 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17734 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17735@@ -152,7 +138,6 @@ void __cpuinit irq_ctx_init(int cpu)
17736 asmlinkage void do_softirq(void)
17737 {
17738 unsigned long flags;
17739- struct thread_info *curctx;
17740 union irq_ctx *irqctx;
17741 u32 *isp;
17742
17743@@ -162,15 +147,22 @@ asmlinkage void do_softirq(void)
17744 local_irq_save(flags);
17745
17746 if (local_softirq_pending()) {
17747- curctx = current_thread_info();
17748 irqctx = __this_cpu_read(softirq_ctx);
17749- irqctx->tinfo.task = curctx->task;
17750- irqctx->tinfo.previous_esp = current_stack_pointer;
17751+ irqctx->previous_esp = current_stack_pointer;
17752
17753 /* build the stack frame on the softirq stack */
17754- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17755+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17756+
17757+#ifdef CONFIG_PAX_MEMORY_UDEREF
17758+ __set_fs(MAKE_MM_SEG(0));
17759+#endif
17760
17761 call_on_stack(__do_softirq, isp);
17762+
17763+#ifdef CONFIG_PAX_MEMORY_UDEREF
17764+ __set_fs(current_thread_info()->addr_limit);
17765+#endif
17766+
17767 /*
17768 * Shouldn't happen, we returned above if in_interrupt():
17769 */
17770@@ -191,7 +183,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
17771 if (unlikely(!desc))
17772 return false;
17773
17774- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
17775+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
17776 if (unlikely(overflow))
17777 print_stack_overflow();
17778 desc->handle_irq(irq, desc);
17779diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
17780index d04d3ec..ea4b374 100644
17781--- a/arch/x86/kernel/irq_64.c
17782+++ b/arch/x86/kernel/irq_64.c
17783@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
17784 u64 estack_top, estack_bottom;
17785 u64 curbase = (u64)task_stack_page(current);
17786
17787- if (user_mode_vm(regs))
17788+ if (user_mode(regs))
17789 return;
17790
17791 if (regs->sp >= curbase + sizeof(struct thread_info) +
17792diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
17793index 1d5d31e..ab846ed 100644
17794--- a/arch/x86/kernel/kdebugfs.c
17795+++ b/arch/x86/kernel/kdebugfs.c
17796@@ -28,6 +28,8 @@ struct setup_data_node {
17797 };
17798
17799 static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17800+ size_t count, loff_t *ppos) __size_overflow(3);
17801+static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17802 size_t count, loff_t *ppos)
17803 {
17804 struct setup_data_node *node = file->private_data;
17805diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17806index 8bfb614..2b3b35f 100644
17807--- a/arch/x86/kernel/kgdb.c
17808+++ b/arch/x86/kernel/kgdb.c
17809@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
17810 #ifdef CONFIG_X86_32
17811 switch (regno) {
17812 case GDB_SS:
17813- if (!user_mode_vm(regs))
17814+ if (!user_mode(regs))
17815 *(unsigned long *)mem = __KERNEL_DS;
17816 break;
17817 case GDB_SP:
17818- if (!user_mode_vm(regs))
17819+ if (!user_mode(regs))
17820 *(unsigned long *)mem = kernel_stack_pointer(regs);
17821 break;
17822 case GDB_GS:
17823@@ -476,12 +476,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17824 case 'k':
17825 /* clear the trace bit */
17826 linux_regs->flags &= ~X86_EFLAGS_TF;
17827- atomic_set(&kgdb_cpu_doing_single_step, -1);
17828+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17829
17830 /* set the trace bit if we're stepping */
17831 if (remcomInBuffer[0] == 's') {
17832 linux_regs->flags |= X86_EFLAGS_TF;
17833- atomic_set(&kgdb_cpu_doing_single_step,
17834+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17835 raw_smp_processor_id());
17836 }
17837
17838@@ -546,7 +546,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17839
17840 switch (cmd) {
17841 case DIE_DEBUG:
17842- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
17843+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
17844 if (user_mode(regs))
17845 return single_step_cont(regs, args);
17846 break;
17847diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
17848index c5e410e..da6aaf9 100644
17849--- a/arch/x86/kernel/kprobes-opt.c
17850+++ b/arch/x86/kernel/kprobes-opt.c
17851@@ -338,7 +338,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17852 * Verify if the address gap is in 2GB range, because this uses
17853 * a relative jump.
17854 */
17855- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
17856+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
17857 if (abs(rel) > 0x7fffffff)
17858 return -ERANGE;
17859
17860@@ -359,11 +359,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17861 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
17862
17863 /* Set probe function call */
17864- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
17865+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
17866
17867 /* Set returning jmp instruction at the tail of out-of-line buffer */
17868 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
17869- (u8 *)op->kp.addr + op->optinsn.size);
17870+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
17871
17872 flush_icache_range((unsigned long) buf,
17873 (unsigned long) buf + TMPL_END_IDX +
17874@@ -385,7 +385,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
17875 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
17876
17877 /* Backup instructions which will be replaced by jump address */
17878- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
17879+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
17880 RELATIVE_ADDR_SIZE);
17881
17882 insn_buf[0] = RELATIVEJUMP_OPCODE;
17883diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17884index e213fc8..d783ba4 100644
17885--- a/arch/x86/kernel/kprobes.c
17886+++ b/arch/x86/kernel/kprobes.c
17887@@ -120,8 +120,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
17888 } __attribute__((packed)) *insn;
17889
17890 insn = (struct __arch_relative_insn *)from;
17891+
17892+ pax_open_kernel();
17893 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
17894 insn->op = op;
17895+ pax_close_kernel();
17896 }
17897
17898 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
17899@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
17900 kprobe_opcode_t opcode;
17901 kprobe_opcode_t *orig_opcodes = opcodes;
17902
17903- if (search_exception_tables((unsigned long)opcodes))
17904+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17905 return 0; /* Page fault may occur on this address. */
17906
17907 retry:
17908@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
17909 /* Another subsystem puts a breakpoint, failed to recover */
17910 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
17911 return 0;
17912+ pax_open_kernel();
17913 memcpy(dest, insn.kaddr, insn.length);
17914+ pax_close_kernel();
17915
17916 #ifdef CONFIG_X86_64
17917 if (insn_rip_relative(&insn)) {
17918@@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
17919 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
17920 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
17921 disp = (u8 *) dest + insn_offset_displacement(&insn);
17922+ pax_open_kernel();
17923 *(s32 *) disp = (s32) newdisp;
17924+ pax_close_kernel();
17925 }
17926 #endif
17927 return insn.length;
17928@@ -485,7 +492,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
17929 * nor set current_kprobe, because it doesn't use single
17930 * stepping.
17931 */
17932- regs->ip = (unsigned long)p->ainsn.insn;
17933+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17934 preempt_enable_no_resched();
17935 return;
17936 }
17937@@ -504,7 +511,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
17938 if (p->opcode == BREAKPOINT_INSTRUCTION)
17939 regs->ip = (unsigned long)p->addr;
17940 else
17941- regs->ip = (unsigned long)p->ainsn.insn;
17942+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17943 }
17944
17945 /*
17946@@ -583,7 +590,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
17947 setup_singlestep(p, regs, kcb, 0);
17948 return 1;
17949 }
17950- } else if (*addr != BREAKPOINT_INSTRUCTION) {
17951+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
17952 /*
17953 * The breakpoint instruction was removed right
17954 * after we hit it. Another cpu has removed
17955@@ -628,6 +635,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
17956 " movq %rax, 152(%rsp)\n"
17957 RESTORE_REGS_STRING
17958 " popfq\n"
17959+#ifdef KERNEXEC_PLUGIN
17960+ " btsq $63,(%rsp)\n"
17961+#endif
17962 #else
17963 " pushf\n"
17964 SAVE_REGS_STRING
17965@@ -765,7 +775,7 @@ static void __kprobes
17966 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
17967 {
17968 unsigned long *tos = stack_addr(regs);
17969- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
17970+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
17971 unsigned long orig_ip = (unsigned long)p->addr;
17972 kprobe_opcode_t *insn = p->ainsn.insn;
17973
17974@@ -947,7 +957,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
17975 struct die_args *args = data;
17976 int ret = NOTIFY_DONE;
17977
17978- if (args->regs && user_mode_vm(args->regs))
17979+ if (args->regs && user_mode(args->regs))
17980 return ret;
17981
17982 switch (val) {
17983diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
17984index ebc9873..1b9724b 100644
17985--- a/arch/x86/kernel/ldt.c
17986+++ b/arch/x86/kernel/ldt.c
17987@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
17988 if (reload) {
17989 #ifdef CONFIG_SMP
17990 preempt_disable();
17991- load_LDT(pc);
17992+ load_LDT_nolock(pc);
17993 if (!cpumask_equal(mm_cpumask(current->mm),
17994 cpumask_of(smp_processor_id())))
17995 smp_call_function(flush_ldt, current->mm, 1);
17996 preempt_enable();
17997 #else
17998- load_LDT(pc);
17999+ load_LDT_nolock(pc);
18000 #endif
18001 }
18002 if (oldsize) {
18003@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
18004 return err;
18005
18006 for (i = 0; i < old->size; i++)
18007- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
18008+ write_ldt_entry(new->ldt, i, old->ldt + i);
18009 return 0;
18010 }
18011
18012@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
18013 retval = copy_ldt(&mm->context, &old_mm->context);
18014 mutex_unlock(&old_mm->context.lock);
18015 }
18016+
18017+ if (tsk == current) {
18018+ mm->context.vdso = 0;
18019+
18020+#ifdef CONFIG_X86_32
18021+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18022+ mm->context.user_cs_base = 0UL;
18023+ mm->context.user_cs_limit = ~0UL;
18024+
18025+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18026+ cpus_clear(mm->context.cpu_user_cs_mask);
18027+#endif
18028+
18029+#endif
18030+#endif
18031+
18032+ }
18033+
18034 return retval;
18035 }
18036
18037@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
18038 }
18039 }
18040
18041+#ifdef CONFIG_PAX_SEGMEXEC
18042+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
18043+ error = -EINVAL;
18044+ goto out_unlock;
18045+ }
18046+#endif
18047+
18048 fill_ldt(&ldt, &ldt_info);
18049 if (oldmode)
18050 ldt.avl = 0;
18051diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18052index 5b19e4d..6476a76 100644
18053--- a/arch/x86/kernel/machine_kexec_32.c
18054+++ b/arch/x86/kernel/machine_kexec_32.c
18055@@ -26,7 +26,7 @@
18056 #include <asm/cacheflush.h>
18057 #include <asm/debugreg.h>
18058
18059-static void set_idt(void *newidt, __u16 limit)
18060+static void set_idt(struct desc_struct *newidt, __u16 limit)
18061 {
18062 struct desc_ptr curidt;
18063
18064@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
18065 }
18066
18067
18068-static void set_gdt(void *newgdt, __u16 limit)
18069+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18070 {
18071 struct desc_ptr curgdt;
18072
18073@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
18074 }
18075
18076 control_page = page_address(image->control_code_page);
18077- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18078+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18079
18080 relocate_kernel_ptr = control_page;
18081 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18082diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18083index 0327e2b..e43737b 100644
18084--- a/arch/x86/kernel/microcode_intel.c
18085+++ b/arch/x86/kernel/microcode_intel.c
18086@@ -430,13 +430,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18087
18088 static int get_ucode_user(void *to, const void *from, size_t n)
18089 {
18090- return copy_from_user(to, from, n);
18091+ return copy_from_user(to, (const void __force_user *)from, n);
18092 }
18093
18094 static enum ucode_state
18095 request_microcode_user(int cpu, const void __user *buf, size_t size)
18096 {
18097- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18098+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18099 }
18100
18101 static void microcode_fini_cpu(int cpu)
18102diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18103index f21fd94..61565cd 100644
18104--- a/arch/x86/kernel/module.c
18105+++ b/arch/x86/kernel/module.c
18106@@ -35,15 +35,60 @@
18107 #define DEBUGP(fmt...)
18108 #endif
18109
18110-void *module_alloc(unsigned long size)
18111+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
18112 {
18113- if (PAGE_ALIGN(size) > MODULES_LEN)
18114+ if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
18115 return NULL;
18116 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
18117- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
18118+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
18119 -1, __builtin_return_address(0));
18120 }
18121
18122+void *module_alloc(unsigned long size)
18123+{
18124+
18125+#ifdef CONFIG_PAX_KERNEXEC
18126+ return __module_alloc(size, PAGE_KERNEL);
18127+#else
18128+ return __module_alloc(size, PAGE_KERNEL_EXEC);
18129+#endif
18130+
18131+}
18132+
18133+#ifdef CONFIG_PAX_KERNEXEC
18134+#ifdef CONFIG_X86_32
18135+void *module_alloc_exec(unsigned long size)
18136+{
18137+ struct vm_struct *area;
18138+
18139+ if (size == 0)
18140+ return NULL;
18141+
18142+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18143+ return area ? area->addr : NULL;
18144+}
18145+EXPORT_SYMBOL(module_alloc_exec);
18146+
18147+void module_free_exec(struct module *mod, void *module_region)
18148+{
18149+ vunmap(module_region);
18150+}
18151+EXPORT_SYMBOL(module_free_exec);
18152+#else
18153+void module_free_exec(struct module *mod, void *module_region)
18154+{
18155+ module_free(mod, module_region);
18156+}
18157+EXPORT_SYMBOL(module_free_exec);
18158+
18159+void *module_alloc_exec(unsigned long size)
18160+{
18161+ return __module_alloc(size, PAGE_KERNEL_RX);
18162+}
18163+EXPORT_SYMBOL(module_alloc_exec);
18164+#endif
18165+#endif
18166+
18167 #ifdef CONFIG_X86_32
18168 int apply_relocate(Elf32_Shdr *sechdrs,
18169 const char *strtab,
18170@@ -54,14 +99,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18171 unsigned int i;
18172 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18173 Elf32_Sym *sym;
18174- uint32_t *location;
18175+ uint32_t *plocation, location;
18176
18177 DEBUGP("Applying relocate section %u to %u\n", relsec,
18178 sechdrs[relsec].sh_info);
18179 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18180 /* This is where to make the change */
18181- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18182- + rel[i].r_offset;
18183+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18184+ location = (uint32_t)plocation;
18185+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18186+ plocation = ktla_ktva((void *)plocation);
18187 /* This is the symbol it is referring to. Note that all
18188 undefined symbols have been resolved. */
18189 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18190@@ -70,11 +117,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18191 switch (ELF32_R_TYPE(rel[i].r_info)) {
18192 case R_386_32:
18193 /* We add the value into the location given */
18194- *location += sym->st_value;
18195+ pax_open_kernel();
18196+ *plocation += sym->st_value;
18197+ pax_close_kernel();
18198 break;
18199 case R_386_PC32:
18200 /* Add the value, subtract its postition */
18201- *location += sym->st_value - (uint32_t)location;
18202+ pax_open_kernel();
18203+ *plocation += sym->st_value - location;
18204+ pax_close_kernel();
18205 break;
18206 default:
18207 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18208@@ -119,21 +170,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18209 case R_X86_64_NONE:
18210 break;
18211 case R_X86_64_64:
18212+ pax_open_kernel();
18213 *(u64 *)loc = val;
18214+ pax_close_kernel();
18215 break;
18216 case R_X86_64_32:
18217+ pax_open_kernel();
18218 *(u32 *)loc = val;
18219+ pax_close_kernel();
18220 if (val != *(u32 *)loc)
18221 goto overflow;
18222 break;
18223 case R_X86_64_32S:
18224+ pax_open_kernel();
18225 *(s32 *)loc = val;
18226+ pax_close_kernel();
18227 if ((s64)val != *(s32 *)loc)
18228 goto overflow;
18229 break;
18230 case R_X86_64_PC32:
18231 val -= (u64)loc;
18232+ pax_open_kernel();
18233 *(u32 *)loc = val;
18234+ pax_close_kernel();
18235+
18236 #if 0
18237 if ((s64)val != *(s32 *)loc)
18238 goto overflow;
18239diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
18240index 32856fa..ce95eaa 100644
18241--- a/arch/x86/kernel/nmi.c
18242+++ b/arch/x86/kernel/nmi.c
18243@@ -507,6 +507,17 @@ static inline void nmi_nesting_postprocess(void)
18244 dotraplinkage notrace __kprobes void
18245 do_nmi(struct pt_regs *regs, long error_code)
18246 {
18247+
18248+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18249+ if (!user_mode(regs)) {
18250+ unsigned long cs = regs->cs & 0xFFFF;
18251+ unsigned long ip = ktva_ktla(regs->ip);
18252+
18253+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
18254+ regs->ip = ip;
18255+ }
18256+#endif
18257+
18258 nmi_nesting_preprocess(regs);
18259
18260 nmi_enter();
18261diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18262index 676b8c7..870ba04 100644
18263--- a/arch/x86/kernel/paravirt-spinlocks.c
18264+++ b/arch/x86/kernel/paravirt-spinlocks.c
18265@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
18266 arch_spin_lock(lock);
18267 }
18268
18269-struct pv_lock_ops pv_lock_ops = {
18270+struct pv_lock_ops pv_lock_ops __read_only = {
18271 #ifdef CONFIG_SMP
18272 .spin_is_locked = __ticket_spin_is_locked,
18273 .spin_is_contended = __ticket_spin_is_contended,
18274diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18275index ab13760..01218e0 100644
18276--- a/arch/x86/kernel/paravirt.c
18277+++ b/arch/x86/kernel/paravirt.c
18278@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
18279 {
18280 return x;
18281 }
18282+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18283+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18284+#endif
18285
18286 void __init default_banner(void)
18287 {
18288@@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18289 if (opfunc == NULL)
18290 /* If there's no function, patch it with a ud2a (BUG) */
18291 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18292- else if (opfunc == _paravirt_nop)
18293+ else if (opfunc == (void *)_paravirt_nop)
18294 /* If the operation is a nop, then nop the callsite */
18295 ret = paravirt_patch_nop();
18296
18297 /* identity functions just return their single argument */
18298- else if (opfunc == _paravirt_ident_32)
18299+ else if (opfunc == (void *)_paravirt_ident_32)
18300 ret = paravirt_patch_ident_32(insnbuf, len);
18301- else if (opfunc == _paravirt_ident_64)
18302+ else if (opfunc == (void *)_paravirt_ident_64)
18303 ret = paravirt_patch_ident_64(insnbuf, len);
18304+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18305+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18306+ ret = paravirt_patch_ident_64(insnbuf, len);
18307+#endif
18308
18309 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18310 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18311@@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18312 if (insn_len > len || start == NULL)
18313 insn_len = len;
18314 else
18315- memcpy(insnbuf, start, insn_len);
18316+ memcpy(insnbuf, ktla_ktva(start), insn_len);
18317
18318 return insn_len;
18319 }
18320@@ -304,7 +311,7 @@ void arch_flush_lazy_mmu_mode(void)
18321 preempt_enable();
18322 }
18323
18324-struct pv_info pv_info = {
18325+struct pv_info pv_info __read_only = {
18326 .name = "bare hardware",
18327 .paravirt_enabled = 0,
18328 .kernel_rpl = 0,
18329@@ -315,16 +322,16 @@ struct pv_info pv_info = {
18330 #endif
18331 };
18332
18333-struct pv_init_ops pv_init_ops = {
18334+struct pv_init_ops pv_init_ops __read_only = {
18335 .patch = native_patch,
18336 };
18337
18338-struct pv_time_ops pv_time_ops = {
18339+struct pv_time_ops pv_time_ops __read_only = {
18340 .sched_clock = native_sched_clock,
18341 .steal_clock = native_steal_clock,
18342 };
18343
18344-struct pv_irq_ops pv_irq_ops = {
18345+struct pv_irq_ops pv_irq_ops __read_only = {
18346 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18347 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18348 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18349@@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
18350 #endif
18351 };
18352
18353-struct pv_cpu_ops pv_cpu_ops = {
18354+struct pv_cpu_ops pv_cpu_ops __read_only = {
18355 .cpuid = native_cpuid,
18356 .get_debugreg = native_get_debugreg,
18357 .set_debugreg = native_set_debugreg,
18358@@ -397,21 +404,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18359 .end_context_switch = paravirt_nop,
18360 };
18361
18362-struct pv_apic_ops pv_apic_ops = {
18363+struct pv_apic_ops pv_apic_ops __read_only = {
18364 #ifdef CONFIG_X86_LOCAL_APIC
18365 .startup_ipi_hook = paravirt_nop,
18366 #endif
18367 };
18368
18369-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18370+#ifdef CONFIG_X86_32
18371+#ifdef CONFIG_X86_PAE
18372+/* 64-bit pagetable entries */
18373+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18374+#else
18375 /* 32-bit pagetable entries */
18376 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18377+#endif
18378 #else
18379 /* 64-bit pagetable entries */
18380 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18381 #endif
18382
18383-struct pv_mmu_ops pv_mmu_ops = {
18384+struct pv_mmu_ops pv_mmu_ops __read_only = {
18385
18386 .read_cr2 = native_read_cr2,
18387 .write_cr2 = native_write_cr2,
18388@@ -461,6 +473,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18389 .make_pud = PTE_IDENT,
18390
18391 .set_pgd = native_set_pgd,
18392+ .set_pgd_batched = native_set_pgd_batched,
18393 #endif
18394 #endif /* PAGETABLE_LEVELS >= 3 */
18395
18396@@ -480,6 +493,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18397 },
18398
18399 .set_fixmap = native_set_fixmap,
18400+
18401+#ifdef CONFIG_PAX_KERNEXEC
18402+ .pax_open_kernel = native_pax_open_kernel,
18403+ .pax_close_kernel = native_pax_close_kernel,
18404+#endif
18405+
18406 };
18407
18408 EXPORT_SYMBOL_GPL(pv_time_ops);
18409diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
18410index 35ccf75..7a15747 100644
18411--- a/arch/x86/kernel/pci-iommu_table.c
18412+++ b/arch/x86/kernel/pci-iommu_table.c
18413@@ -2,7 +2,7 @@
18414 #include <asm/iommu_table.h>
18415 #include <linux/string.h>
18416 #include <linux/kallsyms.h>
18417-
18418+#include <linux/sched.h>
18419
18420 #define DEBUG 1
18421
18422diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18423index 1d92a5a..7bc8c29 100644
18424--- a/arch/x86/kernel/process.c
18425+++ b/arch/x86/kernel/process.c
18426@@ -69,16 +69,33 @@ void free_thread_xstate(struct task_struct *tsk)
18427
18428 void free_thread_info(struct thread_info *ti)
18429 {
18430- free_thread_xstate(ti->task);
18431 free_pages((unsigned long)ti, THREAD_ORDER);
18432 }
18433
18434+static struct kmem_cache *task_struct_cachep;
18435+
18436 void arch_task_cache_init(void)
18437 {
18438- task_xstate_cachep =
18439- kmem_cache_create("task_xstate", xstate_size,
18440+ /* create a slab on which task_structs can be allocated */
18441+ task_struct_cachep =
18442+ kmem_cache_create("task_struct", sizeof(struct task_struct),
18443+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18444+
18445+ task_xstate_cachep =
18446+ kmem_cache_create("task_xstate", xstate_size,
18447 __alignof__(union thread_xstate),
18448- SLAB_PANIC | SLAB_NOTRACK, NULL);
18449+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18450+}
18451+
18452+struct task_struct *alloc_task_struct_node(int node)
18453+{
18454+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
18455+}
18456+
18457+void free_task_struct(struct task_struct *task)
18458+{
18459+ free_thread_xstate(task);
18460+ kmem_cache_free(task_struct_cachep, task);
18461 }
18462
18463 /*
18464@@ -91,7 +108,7 @@ void exit_thread(void)
18465 unsigned long *bp = t->io_bitmap_ptr;
18466
18467 if (bp) {
18468- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18469+ struct tss_struct *tss = init_tss + get_cpu();
18470
18471 t->io_bitmap_ptr = NULL;
18472 clear_thread_flag(TIF_IO_BITMAP);
18473@@ -127,7 +144,7 @@ void show_regs_common(void)
18474
18475 printk(KERN_CONT "\n");
18476 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
18477- current->pid, current->comm, print_tainted(),
18478+ task_pid_nr(current), current->comm, print_tainted(),
18479 init_utsname()->release,
18480 (int)strcspn(init_utsname()->version, " "),
18481 init_utsname()->version);
18482@@ -141,6 +158,9 @@ void flush_thread(void)
18483 {
18484 struct task_struct *tsk = current;
18485
18486+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18487+ loadsegment(gs, 0);
18488+#endif
18489 flush_ptrace_hw_breakpoint(tsk);
18490 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
18491 /*
18492@@ -303,10 +323,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18493 regs.di = (unsigned long) arg;
18494
18495 #ifdef CONFIG_X86_32
18496- regs.ds = __USER_DS;
18497- regs.es = __USER_DS;
18498+ regs.ds = __KERNEL_DS;
18499+ regs.es = __KERNEL_DS;
18500 regs.fs = __KERNEL_PERCPU;
18501- regs.gs = __KERNEL_STACK_CANARY;
18502+ savesegment(gs, regs.gs);
18503 #else
18504 regs.ss = __KERNEL_DS;
18505 #endif
18506@@ -392,7 +412,7 @@ static void __exit_idle(void)
18507 void exit_idle(void)
18508 {
18509 /* idle loop has pid 0 */
18510- if (current->pid)
18511+ if (task_pid_nr(current))
18512 return;
18513 __exit_idle();
18514 }
18515@@ -501,7 +521,7 @@ bool set_pm_idle_to_default(void)
18516
18517 return ret;
18518 }
18519-void stop_this_cpu(void *dummy)
18520+__noreturn void stop_this_cpu(void *dummy)
18521 {
18522 local_irq_disable();
18523 /*
18524@@ -743,16 +763,37 @@ static int __init idle_setup(char *str)
18525 }
18526 early_param("idle", idle_setup);
18527
18528-unsigned long arch_align_stack(unsigned long sp)
18529+#ifdef CONFIG_PAX_RANDKSTACK
18530+void pax_randomize_kstack(struct pt_regs *regs)
18531 {
18532- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18533- sp -= get_random_int() % 8192;
18534- return sp & ~0xf;
18535-}
18536+ struct thread_struct *thread = &current->thread;
18537+ unsigned long time;
18538
18539-unsigned long arch_randomize_brk(struct mm_struct *mm)
18540-{
18541- unsigned long range_end = mm->brk + 0x02000000;
18542- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18543-}
18544+ if (!randomize_va_space)
18545+ return;
18546+
18547+ if (v8086_mode(regs))
18548+ return;
18549
18550+ rdtscl(time);
18551+
18552+ /* P4 seems to return a 0 LSB, ignore it */
18553+#ifdef CONFIG_MPENTIUM4
18554+ time &= 0x3EUL;
18555+ time <<= 2;
18556+#elif defined(CONFIG_X86_64)
18557+ time &= 0xFUL;
18558+ time <<= 4;
18559+#else
18560+ time &= 0x1FUL;
18561+ time <<= 3;
18562+#endif
18563+
18564+ thread->sp0 ^= time;
18565+ load_sp0(init_tss + smp_processor_id(), thread);
18566+
18567+#ifdef CONFIG_X86_64
18568+ percpu_write(kernel_stack, thread->sp0);
18569+#endif
18570+}
18571+#endif
18572diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18573index ae68473..7b0bb71 100644
18574--- a/arch/x86/kernel/process_32.c
18575+++ b/arch/x86/kernel/process_32.c
18576@@ -64,6 +64,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18577 unsigned long thread_saved_pc(struct task_struct *tsk)
18578 {
18579 return ((unsigned long *)tsk->thread.sp)[3];
18580+//XXX return tsk->thread.eip;
18581 }
18582
18583 void __show_regs(struct pt_regs *regs, int all)
18584@@ -73,15 +74,14 @@ void __show_regs(struct pt_regs *regs, int all)
18585 unsigned long sp;
18586 unsigned short ss, gs;
18587
18588- if (user_mode_vm(regs)) {
18589+ if (user_mode(regs)) {
18590 sp = regs->sp;
18591 ss = regs->ss & 0xffff;
18592- gs = get_user_gs(regs);
18593 } else {
18594 sp = kernel_stack_pointer(regs);
18595 savesegment(ss, ss);
18596- savesegment(gs, gs);
18597 }
18598+ gs = get_user_gs(regs);
18599
18600 show_regs_common();
18601
18602@@ -143,13 +143,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18603 struct task_struct *tsk;
18604 int err;
18605
18606- childregs = task_pt_regs(p);
18607+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18608 *childregs = *regs;
18609 childregs->ax = 0;
18610 childregs->sp = sp;
18611
18612 p->thread.sp = (unsigned long) childregs;
18613 p->thread.sp0 = (unsigned long) (childregs+1);
18614+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18615
18616 p->thread.ip = (unsigned long) ret_from_fork;
18617
18618@@ -240,7 +241,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18619 struct thread_struct *prev = &prev_p->thread,
18620 *next = &next_p->thread;
18621 int cpu = smp_processor_id();
18622- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18623+ struct tss_struct *tss = init_tss + cpu;
18624 fpu_switch_t fpu;
18625
18626 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18627@@ -264,6 +265,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18628 */
18629 lazy_save_gs(prev->gs);
18630
18631+#ifdef CONFIG_PAX_MEMORY_UDEREF
18632+ __set_fs(task_thread_info(next_p)->addr_limit);
18633+#endif
18634+
18635 /*
18636 * Load the per-thread Thread-Local Storage descriptor.
18637 */
18638@@ -294,6 +299,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18639 */
18640 arch_end_context_switch(next_p);
18641
18642+ percpu_write(current_task, next_p);
18643+ percpu_write(current_tinfo, &next_p->tinfo);
18644+
18645 /*
18646 * Restore %gs if needed (which is common)
18647 */
18648@@ -302,8 +310,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18649
18650 switch_fpu_finish(next_p, fpu);
18651
18652- percpu_write(current_task, next_p);
18653-
18654 return prev_p;
18655 }
18656
18657@@ -333,4 +339,3 @@ unsigned long get_wchan(struct task_struct *p)
18658 } while (count++ < 16);
18659 return 0;
18660 }
18661-
18662diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
18663index 43d8b48..c45d566 100644
18664--- a/arch/x86/kernel/process_64.c
18665+++ b/arch/x86/kernel/process_64.c
18666@@ -162,8 +162,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18667 struct pt_regs *childregs;
18668 struct task_struct *me = current;
18669
18670- childregs = ((struct pt_regs *)
18671- (THREAD_SIZE + task_stack_page(p))) - 1;
18672+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
18673 *childregs = *regs;
18674
18675 childregs->ax = 0;
18676@@ -175,6 +174,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18677 p->thread.sp = (unsigned long) childregs;
18678 p->thread.sp0 = (unsigned long) (childregs+1);
18679 p->thread.usersp = me->thread.usersp;
18680+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18681
18682 set_tsk_thread_flag(p, TIF_FORK);
18683
18684@@ -280,7 +280,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18685 struct thread_struct *prev = &prev_p->thread;
18686 struct thread_struct *next = &next_p->thread;
18687 int cpu = smp_processor_id();
18688- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18689+ struct tss_struct *tss = init_tss + cpu;
18690 unsigned fsindex, gsindex;
18691 fpu_switch_t fpu;
18692
18693@@ -362,10 +362,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18694 prev->usersp = percpu_read(old_rsp);
18695 percpu_write(old_rsp, next->usersp);
18696 percpu_write(current_task, next_p);
18697+ percpu_write(current_tinfo, &next_p->tinfo);
18698
18699- percpu_write(kernel_stack,
18700- (unsigned long)task_stack_page(next_p) +
18701- THREAD_SIZE - KERNEL_STACK_OFFSET);
18702+ percpu_write(kernel_stack, next->sp0);
18703
18704 /*
18705 * Now maybe reload the debug registers and handle I/O bitmaps
18706@@ -434,12 +433,11 @@ unsigned long get_wchan(struct task_struct *p)
18707 if (!p || p == current || p->state == TASK_RUNNING)
18708 return 0;
18709 stack = (unsigned long)task_stack_page(p);
18710- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18711+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18712 return 0;
18713 fp = *(u64 *)(p->thread.sp);
18714 do {
18715- if (fp < (unsigned long)stack ||
18716- fp >= (unsigned long)stack+THREAD_SIZE)
18717+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18718 return 0;
18719 ip = *(u64 *)(fp+8);
18720 if (!in_sched_functions(ip))
18721diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18722index cf11783..e7ce551 100644
18723--- a/arch/x86/kernel/ptrace.c
18724+++ b/arch/x86/kernel/ptrace.c
18725@@ -824,7 +824,7 @@ long arch_ptrace(struct task_struct *child, long request,
18726 unsigned long addr, unsigned long data)
18727 {
18728 int ret;
18729- unsigned long __user *datap = (unsigned long __user *)data;
18730+ unsigned long __user *datap = (__force unsigned long __user *)data;
18731
18732 switch (request) {
18733 /* read the word at location addr in the USER area. */
18734@@ -909,14 +909,14 @@ long arch_ptrace(struct task_struct *child, long request,
18735 if ((int) addr < 0)
18736 return -EIO;
18737 ret = do_get_thread_area(child, addr,
18738- (struct user_desc __user *)data);
18739+ (__force struct user_desc __user *) data);
18740 break;
18741
18742 case PTRACE_SET_THREAD_AREA:
18743 if ((int) addr < 0)
18744 return -EIO;
18745 ret = do_set_thread_area(child, addr,
18746- (struct user_desc __user *)data, 0);
18747+ (__force struct user_desc __user *) data, 0);
18748 break;
18749 #endif
18750
18751@@ -1426,7 +1426,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
18752 memset(info, 0, sizeof(*info));
18753 info->si_signo = SIGTRAP;
18754 info->si_code = si_code;
18755- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
18756+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
18757 }
18758
18759 void user_single_step_siginfo(struct task_struct *tsk,
18760@@ -1455,6 +1455,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18761 # define IS_IA32 0
18762 #endif
18763
18764+#ifdef CONFIG_GRKERNSEC_SETXID
18765+extern void gr_delayed_cred_worker(void);
18766+#endif
18767+
18768 /*
18769 * We must return the syscall number to actually look up in the table.
18770 * This can be -1L to skip running any syscall at all.
18771@@ -1463,6 +1467,11 @@ long syscall_trace_enter(struct pt_regs *regs)
18772 {
18773 long ret = 0;
18774
18775+#ifdef CONFIG_GRKERNSEC_SETXID
18776+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18777+ gr_delayed_cred_worker();
18778+#endif
18779+
18780 /*
18781 * If we stepped into a sysenter/syscall insn, it trapped in
18782 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
18783@@ -1506,6 +1515,11 @@ void syscall_trace_leave(struct pt_regs *regs)
18784 {
18785 bool step;
18786
18787+#ifdef CONFIG_GRKERNSEC_SETXID
18788+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18789+ gr_delayed_cred_worker();
18790+#endif
18791+
18792 audit_syscall_exit(regs);
18793
18794 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
18795diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
18796index 42eb330..139955c 100644
18797--- a/arch/x86/kernel/pvclock.c
18798+++ b/arch/x86/kernel/pvclock.c
18799@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
18800 return pv_tsc_khz;
18801 }
18802
18803-static atomic64_t last_value = ATOMIC64_INIT(0);
18804+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
18805
18806 void pvclock_resume(void)
18807 {
18808- atomic64_set(&last_value, 0);
18809+ atomic64_set_unchecked(&last_value, 0);
18810 }
18811
18812 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18813@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18814 * updating at the same time, and one of them could be slightly behind,
18815 * making the assumption that last_value always go forward fail to hold.
18816 */
18817- last = atomic64_read(&last_value);
18818+ last = atomic64_read_unchecked(&last_value);
18819 do {
18820 if (ret < last)
18821 return last;
18822- last = atomic64_cmpxchg(&last_value, last, ret);
18823+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
18824 } while (unlikely(last != ret));
18825
18826 return ret;
18827diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18828index d840e69..98e9581 100644
18829--- a/arch/x86/kernel/reboot.c
18830+++ b/arch/x86/kernel/reboot.c
18831@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
18832 EXPORT_SYMBOL(pm_power_off);
18833
18834 static const struct desc_ptr no_idt = {};
18835-static int reboot_mode;
18836+static unsigned short reboot_mode;
18837 enum reboot_type reboot_type = BOOT_ACPI;
18838 int reboot_force;
18839
18840@@ -335,13 +335,17 @@ core_initcall(reboot_init);
18841 extern const unsigned char machine_real_restart_asm[];
18842 extern const u64 machine_real_restart_gdt[3];
18843
18844-void machine_real_restart(unsigned int type)
18845+__noreturn void machine_real_restart(unsigned int type)
18846 {
18847 void *restart_va;
18848 unsigned long restart_pa;
18849- void (*restart_lowmem)(unsigned int);
18850+ void (* __noreturn restart_lowmem)(unsigned int);
18851 u64 *lowmem_gdt;
18852
18853+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18854+ struct desc_struct *gdt;
18855+#endif
18856+
18857 local_irq_disable();
18858
18859 /* Write zero to CMOS register number 0x0f, which the BIOS POST
18860@@ -367,14 +371,14 @@ void machine_real_restart(unsigned int type)
18861 boot)". This seems like a fairly standard thing that gets set by
18862 REBOOT.COM programs, and the previous reset routine did this
18863 too. */
18864- *((unsigned short *)0x472) = reboot_mode;
18865+ *(unsigned short *)(__va(0x472)) = reboot_mode;
18866
18867 /* Patch the GDT in the low memory trampoline */
18868 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
18869
18870 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
18871 restart_pa = virt_to_phys(restart_va);
18872- restart_lowmem = (void (*)(unsigned int))restart_pa;
18873+ restart_lowmem = (void *)restart_pa;
18874
18875 /* GDT[0]: GDT self-pointer */
18876 lowmem_gdt[0] =
18877@@ -385,7 +389,33 @@ void machine_real_restart(unsigned int type)
18878 GDT_ENTRY(0x009b, restart_pa, 0xffff);
18879
18880 /* Jump to the identity-mapped low memory code */
18881+
18882+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18883+ gdt = get_cpu_gdt_table(smp_processor_id());
18884+ pax_open_kernel();
18885+#ifdef CONFIG_PAX_MEMORY_UDEREF
18886+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
18887+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
18888+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
18889+#endif
18890+#ifdef CONFIG_PAX_KERNEXEC
18891+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
18892+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
18893+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
18894+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
18895+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
18896+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
18897+#endif
18898+ pax_close_kernel();
18899+#endif
18900+
18901+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18902+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
18903+ unreachable();
18904+#else
18905 restart_lowmem(type);
18906+#endif
18907+
18908 }
18909 #ifdef CONFIG_APM_MODULE
18910 EXPORT_SYMBOL(machine_real_restart);
18911@@ -556,7 +586,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
18912 * try to force a triple fault and then cycle between hitting the keyboard
18913 * controller and doing that
18914 */
18915-static void native_machine_emergency_restart(void)
18916+__noreturn static void native_machine_emergency_restart(void)
18917 {
18918 int i;
18919 int attempt = 0;
18920@@ -680,13 +710,13 @@ void native_machine_shutdown(void)
18921 #endif
18922 }
18923
18924-static void __machine_emergency_restart(int emergency)
18925+static __noreturn void __machine_emergency_restart(int emergency)
18926 {
18927 reboot_emergency = emergency;
18928 machine_ops.emergency_restart();
18929 }
18930
18931-static void native_machine_restart(char *__unused)
18932+static __noreturn void native_machine_restart(char *__unused)
18933 {
18934 printk("machine restart\n");
18935
18936@@ -695,7 +725,7 @@ static void native_machine_restart(char *__unused)
18937 __machine_emergency_restart(0);
18938 }
18939
18940-static void native_machine_halt(void)
18941+static __noreturn void native_machine_halt(void)
18942 {
18943 /* stop other cpus and apics */
18944 machine_shutdown();
18945@@ -706,7 +736,7 @@ static void native_machine_halt(void)
18946 stop_this_cpu(NULL);
18947 }
18948
18949-static void native_machine_power_off(void)
18950+__noreturn static void native_machine_power_off(void)
18951 {
18952 if (pm_power_off) {
18953 if (!reboot_force)
18954@@ -715,6 +745,7 @@ static void native_machine_power_off(void)
18955 }
18956 /* a fallback in case there is no PM info available */
18957 tboot_shutdown(TB_SHUTDOWN_HALT);
18958+ unreachable();
18959 }
18960
18961 struct machine_ops machine_ops = {
18962diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
18963index 7a6f3b3..bed145d7 100644
18964--- a/arch/x86/kernel/relocate_kernel_64.S
18965+++ b/arch/x86/kernel/relocate_kernel_64.S
18966@@ -11,6 +11,7 @@
18967 #include <asm/kexec.h>
18968 #include <asm/processor-flags.h>
18969 #include <asm/pgtable_types.h>
18970+#include <asm/alternative-asm.h>
18971
18972 /*
18973 * Must be relocatable PIC code callable as a C function
18974@@ -160,13 +161,14 @@ identity_mapped:
18975 xorq %rbp, %rbp
18976 xorq %r8, %r8
18977 xorq %r9, %r9
18978- xorq %r10, %r9
18979+ xorq %r10, %r10
18980 xorq %r11, %r11
18981 xorq %r12, %r12
18982 xorq %r13, %r13
18983 xorq %r14, %r14
18984 xorq %r15, %r15
18985
18986+ pax_force_retaddr 0, 1
18987 ret
18988
18989 1:
18990diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
18991index 1a29015..712f324 100644
18992--- a/arch/x86/kernel/setup.c
18993+++ b/arch/x86/kernel/setup.c
18994@@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
18995
18996 switch (data->type) {
18997 case SETUP_E820_EXT:
18998- parse_e820_ext(data);
18999+ parse_e820_ext((struct setup_data __force_kernel *)data);
19000 break;
19001 case SETUP_DTB:
19002 add_dtb(pa_data);
19003@@ -639,7 +639,7 @@ static void __init trim_bios_range(void)
19004 * area (640->1Mb) as ram even though it is not.
19005 * take them out.
19006 */
19007- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
19008+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
19009 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
19010 }
19011
19012@@ -763,14 +763,14 @@ void __init setup_arch(char **cmdline_p)
19013
19014 if (!boot_params.hdr.root_flags)
19015 root_mountflags &= ~MS_RDONLY;
19016- init_mm.start_code = (unsigned long) _text;
19017- init_mm.end_code = (unsigned long) _etext;
19018+ init_mm.start_code = ktla_ktva((unsigned long) _text);
19019+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
19020 init_mm.end_data = (unsigned long) _edata;
19021 init_mm.brk = _brk_end;
19022
19023- code_resource.start = virt_to_phys(_text);
19024- code_resource.end = virt_to_phys(_etext)-1;
19025- data_resource.start = virt_to_phys(_etext);
19026+ code_resource.start = virt_to_phys(ktla_ktva(_text));
19027+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19028+ data_resource.start = virt_to_phys(_sdata);
19029 data_resource.end = virt_to_phys(_edata)-1;
19030 bss_resource.start = virt_to_phys(&__bss_start);
19031 bss_resource.end = virt_to_phys(&__bss_stop)-1;
19032diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
19033index 5a98aa2..2f9288d 100644
19034--- a/arch/x86/kernel/setup_percpu.c
19035+++ b/arch/x86/kernel/setup_percpu.c
19036@@ -21,19 +21,17 @@
19037 #include <asm/cpu.h>
19038 #include <asm/stackprotector.h>
19039
19040-DEFINE_PER_CPU(int, cpu_number);
19041+#ifdef CONFIG_SMP
19042+DEFINE_PER_CPU(unsigned int, cpu_number);
19043 EXPORT_PER_CPU_SYMBOL(cpu_number);
19044+#endif
19045
19046-#ifdef CONFIG_X86_64
19047 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19048-#else
19049-#define BOOT_PERCPU_OFFSET 0
19050-#endif
19051
19052 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19053 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19054
19055-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19056+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19057 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19058 };
19059 EXPORT_SYMBOL(__per_cpu_offset);
19060@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
19061 {
19062 #ifdef CONFIG_X86_32
19063 struct desc_struct gdt;
19064+ unsigned long base = per_cpu_offset(cpu);
19065
19066- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19067- 0x2 | DESCTYPE_S, 0x8);
19068- gdt.s = 1;
19069+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19070+ 0x83 | DESCTYPE_S, 0xC);
19071 write_gdt_entry(get_cpu_gdt_table(cpu),
19072 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19073 #endif
19074@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
19075 /* alrighty, percpu areas up and running */
19076 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19077 for_each_possible_cpu(cpu) {
19078+#ifdef CONFIG_CC_STACKPROTECTOR
19079+#ifdef CONFIG_X86_32
19080+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
19081+#endif
19082+#endif
19083 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19084 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19085 per_cpu(cpu_number, cpu) = cpu;
19086@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
19087 */
19088 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
19089 #endif
19090+#ifdef CONFIG_CC_STACKPROTECTOR
19091+#ifdef CONFIG_X86_32
19092+ if (!cpu)
19093+ per_cpu(stack_canary.canary, cpu) = canary;
19094+#endif
19095+#endif
19096 /*
19097 * Up to this point, the boot CPU has been using .init.data
19098 * area. Reload any changed state for the boot CPU.
19099diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19100index 115eac4..c0591d5 100644
19101--- a/arch/x86/kernel/signal.c
19102+++ b/arch/x86/kernel/signal.c
19103@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
19104 * Align the stack pointer according to the i386 ABI,
19105 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19106 */
19107- sp = ((sp + 4) & -16ul) - 4;
19108+ sp = ((sp - 12) & -16ul) - 4;
19109 #else /* !CONFIG_X86_32 */
19110 sp = round_down(sp, 16) - 8;
19111 #endif
19112@@ -241,11 +241,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19113 * Return an always-bogus address instead so we will die with SIGSEGV.
19114 */
19115 if (onsigstack && !likely(on_sig_stack(sp)))
19116- return (void __user *)-1L;
19117+ return (__force void __user *)-1L;
19118
19119 /* save i387 state */
19120 if (used_math() && save_i387_xstate(*fpstate) < 0)
19121- return (void __user *)-1L;
19122+ return (__force void __user *)-1L;
19123
19124 return (void __user *)sp;
19125 }
19126@@ -300,9 +300,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19127 }
19128
19129 if (current->mm->context.vdso)
19130- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19131+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19132 else
19133- restorer = &frame->retcode;
19134+ restorer = (void __user *)&frame->retcode;
19135 if (ka->sa.sa_flags & SA_RESTORER)
19136 restorer = ka->sa.sa_restorer;
19137
19138@@ -316,7 +316,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19139 * reasons and because gdb uses it as a signature to notice
19140 * signal handler stack frames.
19141 */
19142- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19143+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19144
19145 if (err)
19146 return -EFAULT;
19147@@ -370,7 +370,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19148 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19149
19150 /* Set up to return from userspace. */
19151- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19152+ if (current->mm->context.vdso)
19153+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19154+ else
19155+ restorer = (void __user *)&frame->retcode;
19156 if (ka->sa.sa_flags & SA_RESTORER)
19157 restorer = ka->sa.sa_restorer;
19158 put_user_ex(restorer, &frame->pretcode);
19159@@ -382,7 +385,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19160 * reasons and because gdb uses it as a signature to notice
19161 * signal handler stack frames.
19162 */
19163- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19164+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19165 } put_user_catch(err);
19166
19167 if (err)
19168@@ -773,7 +776,7 @@ static void do_signal(struct pt_regs *regs)
19169 * X86_32: vm86 regs switched out by assembly code before reaching
19170 * here, so testing against kernel CS suffices.
19171 */
19172- if (!user_mode(regs))
19173+ if (!user_mode_novm(regs))
19174 return;
19175
19176 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
19177diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19178index 6e1e406..edfb7cb 100644
19179--- a/arch/x86/kernel/smpboot.c
19180+++ b/arch/x86/kernel/smpboot.c
19181@@ -699,17 +699,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
19182 set_idle_for_cpu(cpu, c_idle.idle);
19183 do_rest:
19184 per_cpu(current_task, cpu) = c_idle.idle;
19185+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19186 #ifdef CONFIG_X86_32
19187 /* Stack for startup_32 can be just as for start_secondary onwards */
19188 irq_ctx_init(cpu);
19189 #else
19190 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19191 initial_gs = per_cpu_offset(cpu);
19192- per_cpu(kernel_stack, cpu) =
19193- (unsigned long)task_stack_page(c_idle.idle) -
19194- KERNEL_STACK_OFFSET + THREAD_SIZE;
19195+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
19196 #endif
19197+
19198+ pax_open_kernel();
19199 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19200+ pax_close_kernel();
19201+
19202 initial_code = (unsigned long)start_secondary;
19203 stack_start = c_idle.idle->thread.sp;
19204
19205@@ -851,6 +854,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
19206
19207 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19208
19209+#ifdef CONFIG_PAX_PER_CPU_PGD
19210+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19211+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19212+ KERNEL_PGD_PTRS);
19213+#endif
19214+
19215 err = do_boot_cpu(apicid, cpu);
19216 if (err) {
19217 pr_debug("do_boot_cpu failed %d\n", err);
19218diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19219index c346d11..d43b163 100644
19220--- a/arch/x86/kernel/step.c
19221+++ b/arch/x86/kernel/step.c
19222@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19223 struct desc_struct *desc;
19224 unsigned long base;
19225
19226- seg &= ~7UL;
19227+ seg >>= 3;
19228
19229 mutex_lock(&child->mm->context.lock);
19230- if (unlikely((seg >> 3) >= child->mm->context.size))
19231+ if (unlikely(seg >= child->mm->context.size))
19232 addr = -1L; /* bogus selector, access would fault */
19233 else {
19234 desc = child->mm->context.ldt + seg;
19235@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19236 addr += base;
19237 }
19238 mutex_unlock(&child->mm->context.lock);
19239- }
19240+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19241+ addr = ktla_ktva(addr);
19242
19243 return addr;
19244 }
19245@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19246 unsigned char opcode[15];
19247 unsigned long addr = convert_ip_to_linear(child, regs);
19248
19249+ if (addr == -EINVAL)
19250+ return 0;
19251+
19252 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19253 for (i = 0; i < copied; i++) {
19254 switch (opcode[i]) {
19255diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19256index 0b0cb5f..db6b9ed 100644
19257--- a/arch/x86/kernel/sys_i386_32.c
19258+++ b/arch/x86/kernel/sys_i386_32.c
19259@@ -24,17 +24,224 @@
19260
19261 #include <asm/syscalls.h>
19262
19263-/*
19264- * Do a system call from kernel instead of calling sys_execve so we
19265- * end up with proper pt_regs.
19266- */
19267-int kernel_execve(const char *filename,
19268- const char *const argv[],
19269- const char *const envp[])
19270+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19271 {
19272- long __res;
19273- asm volatile ("int $0x80"
19274- : "=a" (__res)
19275- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
19276- return __res;
19277+ unsigned long pax_task_size = TASK_SIZE;
19278+
19279+#ifdef CONFIG_PAX_SEGMEXEC
19280+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19281+ pax_task_size = SEGMEXEC_TASK_SIZE;
19282+#endif
19283+
19284+ if (len > pax_task_size || addr > pax_task_size - len)
19285+ return -EINVAL;
19286+
19287+ return 0;
19288+}
19289+
19290+unsigned long
19291+arch_get_unmapped_area(struct file *filp, unsigned long addr,
19292+ unsigned long len, unsigned long pgoff, unsigned long flags)
19293+{
19294+ struct mm_struct *mm = current->mm;
19295+ struct vm_area_struct *vma;
19296+ unsigned long start_addr, pax_task_size = TASK_SIZE;
19297+
19298+#ifdef CONFIG_PAX_SEGMEXEC
19299+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19300+ pax_task_size = SEGMEXEC_TASK_SIZE;
19301+#endif
19302+
19303+ pax_task_size -= PAGE_SIZE;
19304+
19305+ if (len > pax_task_size)
19306+ return -ENOMEM;
19307+
19308+ if (flags & MAP_FIXED)
19309+ return addr;
19310+
19311+#ifdef CONFIG_PAX_RANDMMAP
19312+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19313+#endif
19314+
19315+ if (addr) {
19316+ addr = PAGE_ALIGN(addr);
19317+ if (pax_task_size - len >= addr) {
19318+ vma = find_vma(mm, addr);
19319+ if (check_heap_stack_gap(vma, addr, len))
19320+ return addr;
19321+ }
19322+ }
19323+ if (len > mm->cached_hole_size) {
19324+ start_addr = addr = mm->free_area_cache;
19325+ } else {
19326+ start_addr = addr = mm->mmap_base;
19327+ mm->cached_hole_size = 0;
19328+ }
19329+
19330+#ifdef CONFIG_PAX_PAGEEXEC
19331+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19332+ start_addr = 0x00110000UL;
19333+
19334+#ifdef CONFIG_PAX_RANDMMAP
19335+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19336+ start_addr += mm->delta_mmap & 0x03FFF000UL;
19337+#endif
19338+
19339+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19340+ start_addr = addr = mm->mmap_base;
19341+ else
19342+ addr = start_addr;
19343+ }
19344+#endif
19345+
19346+full_search:
19347+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19348+ /* At this point: (!vma || addr < vma->vm_end). */
19349+ if (pax_task_size - len < addr) {
19350+ /*
19351+ * Start a new search - just in case we missed
19352+ * some holes.
19353+ */
19354+ if (start_addr != mm->mmap_base) {
19355+ start_addr = addr = mm->mmap_base;
19356+ mm->cached_hole_size = 0;
19357+ goto full_search;
19358+ }
19359+ return -ENOMEM;
19360+ }
19361+ if (check_heap_stack_gap(vma, addr, len))
19362+ break;
19363+ if (addr + mm->cached_hole_size < vma->vm_start)
19364+ mm->cached_hole_size = vma->vm_start - addr;
19365+ addr = vma->vm_end;
19366+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
19367+ start_addr = addr = mm->mmap_base;
19368+ mm->cached_hole_size = 0;
19369+ goto full_search;
19370+ }
19371+ }
19372+
19373+ /*
19374+ * Remember the place where we stopped the search:
19375+ */
19376+ mm->free_area_cache = addr + len;
19377+ return addr;
19378+}
19379+
19380+unsigned long
19381+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19382+ const unsigned long len, const unsigned long pgoff,
19383+ const unsigned long flags)
19384+{
19385+ struct vm_area_struct *vma;
19386+ struct mm_struct *mm = current->mm;
19387+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19388+
19389+#ifdef CONFIG_PAX_SEGMEXEC
19390+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19391+ pax_task_size = SEGMEXEC_TASK_SIZE;
19392+#endif
19393+
19394+ pax_task_size -= PAGE_SIZE;
19395+
19396+ /* requested length too big for entire address space */
19397+ if (len > pax_task_size)
19398+ return -ENOMEM;
19399+
19400+ if (flags & MAP_FIXED)
19401+ return addr;
19402+
19403+#ifdef CONFIG_PAX_PAGEEXEC
19404+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19405+ goto bottomup;
19406+#endif
19407+
19408+#ifdef CONFIG_PAX_RANDMMAP
19409+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19410+#endif
19411+
19412+ /* requesting a specific address */
19413+ if (addr) {
19414+ addr = PAGE_ALIGN(addr);
19415+ if (pax_task_size - len >= addr) {
19416+ vma = find_vma(mm, addr);
19417+ if (check_heap_stack_gap(vma, addr, len))
19418+ return addr;
19419+ }
19420+ }
19421+
19422+ /* check if free_area_cache is useful for us */
19423+ if (len <= mm->cached_hole_size) {
19424+ mm->cached_hole_size = 0;
19425+ mm->free_area_cache = mm->mmap_base;
19426+ }
19427+
19428+ /* either no address requested or can't fit in requested address hole */
19429+ addr = mm->free_area_cache;
19430+
19431+ /* make sure it can fit in the remaining address space */
19432+ if (addr > len) {
19433+ vma = find_vma(mm, addr-len);
19434+ if (check_heap_stack_gap(vma, addr - len, len))
19435+ /* remember the address as a hint for next time */
19436+ return (mm->free_area_cache = addr-len);
19437+ }
19438+
19439+ if (mm->mmap_base < len)
19440+ goto bottomup;
19441+
19442+ addr = mm->mmap_base-len;
19443+
19444+ do {
19445+ /*
19446+ * Lookup failure means no vma is above this address,
19447+ * else if new region fits below vma->vm_start,
19448+ * return with success:
19449+ */
19450+ vma = find_vma(mm, addr);
19451+ if (check_heap_stack_gap(vma, addr, len))
19452+ /* remember the address as a hint for next time */
19453+ return (mm->free_area_cache = addr);
19454+
19455+ /* remember the largest hole we saw so far */
19456+ if (addr + mm->cached_hole_size < vma->vm_start)
19457+ mm->cached_hole_size = vma->vm_start - addr;
19458+
19459+ /* try just below the current vma->vm_start */
19460+ addr = skip_heap_stack_gap(vma, len);
19461+ } while (!IS_ERR_VALUE(addr));
19462+
19463+bottomup:
19464+ /*
19465+ * A failed mmap() very likely causes application failure,
19466+ * so fall back to the bottom-up function here. This scenario
19467+ * can happen with large stack limits and large mmap()
19468+ * allocations.
19469+ */
19470+
19471+#ifdef CONFIG_PAX_SEGMEXEC
19472+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19473+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19474+ else
19475+#endif
19476+
19477+ mm->mmap_base = TASK_UNMAPPED_BASE;
19478+
19479+#ifdef CONFIG_PAX_RANDMMAP
19480+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19481+ mm->mmap_base += mm->delta_mmap;
19482+#endif
19483+
19484+ mm->free_area_cache = mm->mmap_base;
19485+ mm->cached_hole_size = ~0UL;
19486+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19487+ /*
19488+ * Restore the topdown base:
19489+ */
19490+ mm->mmap_base = base;
19491+ mm->free_area_cache = base;
19492+ mm->cached_hole_size = ~0UL;
19493+
19494+ return addr;
19495 }
19496diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19497index b4d3c39..82bb73b 100644
19498--- a/arch/x86/kernel/sys_x86_64.c
19499+++ b/arch/x86/kernel/sys_x86_64.c
19500@@ -95,8 +95,8 @@ out:
19501 return error;
19502 }
19503
19504-static void find_start_end(unsigned long flags, unsigned long *begin,
19505- unsigned long *end)
19506+static void find_start_end(struct mm_struct *mm, unsigned long flags,
19507+ unsigned long *begin, unsigned long *end)
19508 {
19509 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
19510 unsigned long new_begin;
19511@@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19512 *begin = new_begin;
19513 }
19514 } else {
19515- *begin = TASK_UNMAPPED_BASE;
19516+ *begin = mm->mmap_base;
19517 *end = TASK_SIZE;
19518 }
19519 }
19520@@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19521 if (flags & MAP_FIXED)
19522 return addr;
19523
19524- find_start_end(flags, &begin, &end);
19525+ find_start_end(mm, flags, &begin, &end);
19526
19527 if (len > end)
19528 return -ENOMEM;
19529
19530+#ifdef CONFIG_PAX_RANDMMAP
19531+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19532+#endif
19533+
19534 if (addr) {
19535 addr = PAGE_ALIGN(addr);
19536 vma = find_vma(mm, addr);
19537- if (end - len >= addr &&
19538- (!vma || addr + len <= vma->vm_start))
19539+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19540 return addr;
19541 }
19542 if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
19543@@ -172,7 +175,7 @@ full_search:
19544 }
19545 return -ENOMEM;
19546 }
19547- if (!vma || addr + len <= vma->vm_start) {
19548+ if (check_heap_stack_gap(vma, addr, len)) {
19549 /*
19550 * Remember the place where we stopped the search:
19551 */
19552@@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19553 {
19554 struct vm_area_struct *vma;
19555 struct mm_struct *mm = current->mm;
19556- unsigned long addr = addr0, start_addr;
19557+ unsigned long base = mm->mmap_base, addr = addr0, start_addr;
19558
19559 /* requested length too big for entire address space */
19560 if (len > TASK_SIZE)
19561@@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19562 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
19563 goto bottomup;
19564
19565+#ifdef CONFIG_PAX_RANDMMAP
19566+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19567+#endif
19568+
19569 /* requesting a specific address */
19570 if (addr) {
19571 addr = PAGE_ALIGN(addr);
19572- vma = find_vma(mm, addr);
19573- if (TASK_SIZE - len >= addr &&
19574- (!vma || addr + len <= vma->vm_start))
19575- return addr;
19576+ if (TASK_SIZE - len >= addr) {
19577+ vma = find_vma(mm, addr);
19578+ if (check_heap_stack_gap(vma, addr, len))
19579+ return addr;
19580+ }
19581 }
19582
19583 /* check if free_area_cache is useful for us */
19584@@ -240,7 +248,7 @@ try_again:
19585 * return with success:
19586 */
19587 vma = find_vma(mm, addr);
19588- if (!vma || addr+len <= vma->vm_start)
19589+ if (check_heap_stack_gap(vma, addr, len))
19590 /* remember the address as a hint for next time */
19591 return mm->free_area_cache = addr;
19592
19593@@ -249,8 +257,8 @@ try_again:
19594 mm->cached_hole_size = vma->vm_start - addr;
19595
19596 /* try just below the current vma->vm_start */
19597- addr = vma->vm_start-len;
19598- } while (len < vma->vm_start);
19599+ addr = skip_heap_stack_gap(vma, len);
19600+ } while (!IS_ERR_VALUE(addr));
19601
19602 fail:
19603 /*
19604@@ -270,13 +278,21 @@ bottomup:
19605 * can happen with large stack limits and large mmap()
19606 * allocations.
19607 */
19608+ mm->mmap_base = TASK_UNMAPPED_BASE;
19609+
19610+#ifdef CONFIG_PAX_RANDMMAP
19611+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19612+ mm->mmap_base += mm->delta_mmap;
19613+#endif
19614+
19615+ mm->free_area_cache = mm->mmap_base;
19616 mm->cached_hole_size = ~0UL;
19617- mm->free_area_cache = TASK_UNMAPPED_BASE;
19618 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19619 /*
19620 * Restore the topdown base:
19621 */
19622- mm->free_area_cache = mm->mmap_base;
19623+ mm->mmap_base = base;
19624+ mm->free_area_cache = base;
19625 mm->cached_hole_size = ~0UL;
19626
19627 return addr;
19628diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
19629index 6410744..79758f0 100644
19630--- a/arch/x86/kernel/tboot.c
19631+++ b/arch/x86/kernel/tboot.c
19632@@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
19633
19634 void tboot_shutdown(u32 shutdown_type)
19635 {
19636- void (*shutdown)(void);
19637+ void (* __noreturn shutdown)(void);
19638
19639 if (!tboot_enabled())
19640 return;
19641@@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
19642
19643 switch_to_tboot_pt();
19644
19645- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
19646+ shutdown = (void *)tboot->shutdown_entry;
19647 shutdown();
19648
19649 /* should not reach here */
19650@@ -299,7 +299,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
19651 return 0;
19652 }
19653
19654-static atomic_t ap_wfs_count;
19655+static atomic_unchecked_t ap_wfs_count;
19656
19657 static int tboot_wait_for_aps(int num_aps)
19658 {
19659@@ -323,9 +323,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
19660 {
19661 switch (action) {
19662 case CPU_DYING:
19663- atomic_inc(&ap_wfs_count);
19664+ atomic_inc_unchecked(&ap_wfs_count);
19665 if (num_online_cpus() == 1)
19666- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
19667+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
19668 return NOTIFY_BAD;
19669 break;
19670 }
19671@@ -344,7 +344,7 @@ static __init int tboot_late_init(void)
19672
19673 tboot_create_trampoline();
19674
19675- atomic_set(&ap_wfs_count, 0);
19676+ atomic_set_unchecked(&ap_wfs_count, 0);
19677 register_hotcpu_notifier(&tboot_cpu_notifier);
19678
19679 acpi_os_set_prepare_sleep(&tboot_sleep);
19680diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
19681index c6eba2b..3303326 100644
19682--- a/arch/x86/kernel/time.c
19683+++ b/arch/x86/kernel/time.c
19684@@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
19685 {
19686 unsigned long pc = instruction_pointer(regs);
19687
19688- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
19689+ if (!user_mode(regs) && in_lock_functions(pc)) {
19690 #ifdef CONFIG_FRAME_POINTER
19691- return *(unsigned long *)(regs->bp + sizeof(long));
19692+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19693 #else
19694 unsigned long *sp =
19695 (unsigned long *)kernel_stack_pointer(regs);
19696@@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
19697 * or above a saved flags. Eflags has bits 22-31 zero,
19698 * kernel addresses don't.
19699 */
19700+
19701+#ifdef CONFIG_PAX_KERNEXEC
19702+ return ktla_ktva(sp[0]);
19703+#else
19704 if (sp[0] >> 22)
19705 return sp[0];
19706 if (sp[1] >> 22)
19707 return sp[1];
19708 #endif
19709+
19710+#endif
19711 }
19712 return pc;
19713 }
19714diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19715index 9d9d2f9..ed344e4 100644
19716--- a/arch/x86/kernel/tls.c
19717+++ b/arch/x86/kernel/tls.c
19718@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19719 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19720 return -EINVAL;
19721
19722+#ifdef CONFIG_PAX_SEGMEXEC
19723+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19724+ return -EINVAL;
19725+#endif
19726+
19727 set_tls_desc(p, idx, &info, 1);
19728
19729 return 0;
19730diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19731index 451c0a7..e57f551 100644
19732--- a/arch/x86/kernel/trampoline_32.S
19733+++ b/arch/x86/kernel/trampoline_32.S
19734@@ -32,6 +32,12 @@
19735 #include <asm/segment.h>
19736 #include <asm/page_types.h>
19737
19738+#ifdef CONFIG_PAX_KERNEXEC
19739+#define ta(X) (X)
19740+#else
19741+#define ta(X) ((X) - __PAGE_OFFSET)
19742+#endif
19743+
19744 #ifdef CONFIG_SMP
19745
19746 .section ".x86_trampoline","a"
19747@@ -62,7 +68,7 @@ r_base = .
19748 inc %ax # protected mode (PE) bit
19749 lmsw %ax # into protected mode
19750 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
19751- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
19752+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
19753
19754 # These need to be in the same 64K segment as the above;
19755 # hence we don't use the boot_gdt_descr defined in head.S
19756diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
19757index 09ff517..df19fbff 100644
19758--- a/arch/x86/kernel/trampoline_64.S
19759+++ b/arch/x86/kernel/trampoline_64.S
19760@@ -90,7 +90,7 @@ startup_32:
19761 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
19762 movl %eax, %ds
19763
19764- movl $X86_CR4_PAE, %eax
19765+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19766 movl %eax, %cr4 # Enable PAE mode
19767
19768 # Setup trampoline 4 level pagetables
19769@@ -138,7 +138,7 @@ tidt:
19770 # so the kernel can live anywhere
19771 .balign 4
19772 tgdt:
19773- .short tgdt_end - tgdt # gdt limit
19774+ .short tgdt_end - tgdt - 1 # gdt limit
19775 .long tgdt - r_base
19776 .short 0
19777 .quad 0x00cf9b000000ffff # __KERNEL32_CS
19778diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
19779index ff9281f1..30cb4ac 100644
19780--- a/arch/x86/kernel/traps.c
19781+++ b/arch/x86/kernel/traps.c
19782@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
19783
19784 /* Do we ignore FPU interrupts ? */
19785 char ignore_fpu_irq;
19786-
19787-/*
19788- * The IDT has to be page-aligned to simplify the Pentium
19789- * F0 0F bug workaround.
19790- */
19791-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
19792 #endif
19793
19794 DECLARE_BITMAP(used_vectors, NR_VECTORS);
19795@@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
19796 }
19797
19798 static void __kprobes
19799-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19800+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
19801 long error_code, siginfo_t *info)
19802 {
19803 struct task_struct *tsk = current;
19804
19805 #ifdef CONFIG_X86_32
19806- if (regs->flags & X86_VM_MASK) {
19807+ if (v8086_mode(regs)) {
19808 /*
19809 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
19810 * On nmi (interrupt 2), do_trap should not be called.
19811@@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19812 }
19813 #endif
19814
19815- if (!user_mode(regs))
19816+ if (!user_mode_novm(regs))
19817 goto kernel_trap;
19818
19819 #ifdef CONFIG_X86_32
19820@@ -148,7 +142,7 @@ trap_signal:
19821 printk_ratelimit()) {
19822 printk(KERN_INFO
19823 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
19824- tsk->comm, tsk->pid, str,
19825+ tsk->comm, task_pid_nr(tsk), str,
19826 regs->ip, regs->sp, error_code);
19827 print_vma_addr(" in ", regs->ip);
19828 printk("\n");
19829@@ -165,8 +159,20 @@ kernel_trap:
19830 if (!fixup_exception(regs)) {
19831 tsk->thread.error_code = error_code;
19832 tsk->thread.trap_nr = trapnr;
19833+
19834+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19835+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
19836+ str = "PAX: suspicious stack segment fault";
19837+#endif
19838+
19839 die(str, regs, error_code);
19840 }
19841+
19842+#ifdef CONFIG_PAX_REFCOUNT
19843+ if (trapnr == 4)
19844+ pax_report_refcount_overflow(regs);
19845+#endif
19846+
19847 return;
19848
19849 #ifdef CONFIG_X86_32
19850@@ -259,14 +265,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
19851 conditional_sti(regs);
19852
19853 #ifdef CONFIG_X86_32
19854- if (regs->flags & X86_VM_MASK)
19855+ if (v8086_mode(regs))
19856 goto gp_in_vm86;
19857 #endif
19858
19859 tsk = current;
19860- if (!user_mode(regs))
19861+ if (!user_mode_novm(regs))
19862 goto gp_in_kernel;
19863
19864+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19865+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
19866+ struct mm_struct *mm = tsk->mm;
19867+ unsigned long limit;
19868+
19869+ down_write(&mm->mmap_sem);
19870+ limit = mm->context.user_cs_limit;
19871+ if (limit < TASK_SIZE) {
19872+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
19873+ up_write(&mm->mmap_sem);
19874+ return;
19875+ }
19876+ up_write(&mm->mmap_sem);
19877+ }
19878+#endif
19879+
19880 tsk->thread.error_code = error_code;
19881 tsk->thread.trap_nr = X86_TRAP_GP;
19882
19883@@ -299,6 +321,13 @@ gp_in_kernel:
19884 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
19885 X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
19886 return;
19887+
19888+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19889+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
19890+ die("PAX: suspicious general protection fault", regs, error_code);
19891+ else
19892+#endif
19893+
19894 die("general protection fault", regs, error_code);
19895 }
19896
19897@@ -425,7 +454,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19898 /* It's safe to allow irq's after DR6 has been saved */
19899 preempt_conditional_sti(regs);
19900
19901- if (regs->flags & X86_VM_MASK) {
19902+ if (v8086_mode(regs)) {
19903 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
19904 X86_TRAP_DB);
19905 preempt_conditional_cli(regs);
19906@@ -440,7 +469,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19907 * We already checked v86 mode above, so we can check for kernel mode
19908 * by just checking the CPL of CS.
19909 */
19910- if ((dr6 & DR_STEP) && !user_mode(regs)) {
19911+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
19912 tsk->thread.debugreg6 &= ~DR_STEP;
19913 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
19914 regs->flags &= ~X86_EFLAGS_TF;
19915@@ -471,7 +500,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
19916 return;
19917 conditional_sti(regs);
19918
19919- if (!user_mode_vm(regs))
19920+ if (!user_mode(regs))
19921 {
19922 if (!fixup_exception(regs)) {
19923 task->thread.error_code = error_code;
19924diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
19925index b9242ba..50c5edd 100644
19926--- a/arch/x86/kernel/verify_cpu.S
19927+++ b/arch/x86/kernel/verify_cpu.S
19928@@ -20,6 +20,7 @@
19929 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
19930 * arch/x86/kernel/trampoline_64.S: secondary processor verification
19931 * arch/x86/kernel/head_32.S: processor startup
19932+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
19933 *
19934 * verify_cpu, returns the status of longmode and SSE in register %eax.
19935 * 0: Success 1: Failure
19936diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
19937index 255f58a..5e91150 100644
19938--- a/arch/x86/kernel/vm86_32.c
19939+++ b/arch/x86/kernel/vm86_32.c
19940@@ -41,6 +41,7 @@
19941 #include <linux/ptrace.h>
19942 #include <linux/audit.h>
19943 #include <linux/stddef.h>
19944+#include <linux/grsecurity.h>
19945
19946 #include <asm/uaccess.h>
19947 #include <asm/io.h>
19948@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
19949 do_exit(SIGSEGV);
19950 }
19951
19952- tss = &per_cpu(init_tss, get_cpu());
19953+ tss = init_tss + get_cpu();
19954 current->thread.sp0 = current->thread.saved_sp0;
19955 current->thread.sysenter_cs = __KERNEL_CS;
19956 load_sp0(tss, &current->thread);
19957@@ -210,6 +211,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
19958 struct task_struct *tsk;
19959 int tmp, ret = -EPERM;
19960
19961+#ifdef CONFIG_GRKERNSEC_VM86
19962+ if (!capable(CAP_SYS_RAWIO)) {
19963+ gr_handle_vm86();
19964+ goto out;
19965+ }
19966+#endif
19967+
19968 tsk = current;
19969 if (tsk->thread.saved_sp0)
19970 goto out;
19971@@ -240,6 +248,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
19972 int tmp, ret;
19973 struct vm86plus_struct __user *v86;
19974
19975+#ifdef CONFIG_GRKERNSEC_VM86
19976+ if (!capable(CAP_SYS_RAWIO)) {
19977+ gr_handle_vm86();
19978+ ret = -EPERM;
19979+ goto out;
19980+ }
19981+#endif
19982+
19983 tsk = current;
19984 switch (cmd) {
19985 case VM86_REQUEST_IRQ:
19986@@ -326,7 +342,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
19987 tsk->thread.saved_fs = info->regs32->fs;
19988 tsk->thread.saved_gs = get_user_gs(info->regs32);
19989
19990- tss = &per_cpu(init_tss, get_cpu());
19991+ tss = init_tss + get_cpu();
19992 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
19993 if (cpu_has_sep)
19994 tsk->thread.sysenter_cs = 0;
19995@@ -533,7 +549,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
19996 goto cannot_handle;
19997 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
19998 goto cannot_handle;
19999- intr_ptr = (unsigned long __user *) (i << 2);
20000+ intr_ptr = (__force unsigned long __user *) (i << 2);
20001 if (get_user(segoffs, intr_ptr))
20002 goto cannot_handle;
20003 if ((segoffs >> 16) == BIOSSEG)
20004diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
20005index 0f703f1..9e15f64 100644
20006--- a/arch/x86/kernel/vmlinux.lds.S
20007+++ b/arch/x86/kernel/vmlinux.lds.S
20008@@ -26,6 +26,13 @@
20009 #include <asm/page_types.h>
20010 #include <asm/cache.h>
20011 #include <asm/boot.h>
20012+#include <asm/segment.h>
20013+
20014+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20015+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
20016+#else
20017+#define __KERNEL_TEXT_OFFSET 0
20018+#endif
20019
20020 #undef i386 /* in case the preprocessor is a 32bit one */
20021
20022@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
20023
20024 PHDRS {
20025 text PT_LOAD FLAGS(5); /* R_E */
20026+#ifdef CONFIG_X86_32
20027+ module PT_LOAD FLAGS(5); /* R_E */
20028+#endif
20029+#ifdef CONFIG_XEN
20030+ rodata PT_LOAD FLAGS(5); /* R_E */
20031+#else
20032+ rodata PT_LOAD FLAGS(4); /* R__ */
20033+#endif
20034 data PT_LOAD FLAGS(6); /* RW_ */
20035-#ifdef CONFIG_X86_64
20036+ init.begin PT_LOAD FLAGS(6); /* RW_ */
20037 #ifdef CONFIG_SMP
20038 percpu PT_LOAD FLAGS(6); /* RW_ */
20039 #endif
20040+ text.init PT_LOAD FLAGS(5); /* R_E */
20041+ text.exit PT_LOAD FLAGS(5); /* R_E */
20042 init PT_LOAD FLAGS(7); /* RWE */
20043-#endif
20044 note PT_NOTE FLAGS(0); /* ___ */
20045 }
20046
20047 SECTIONS
20048 {
20049 #ifdef CONFIG_X86_32
20050- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20051- phys_startup_32 = startup_32 - LOAD_OFFSET;
20052+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20053 #else
20054- . = __START_KERNEL;
20055- phys_startup_64 = startup_64 - LOAD_OFFSET;
20056+ . = __START_KERNEL;
20057 #endif
20058
20059 /* Text and read-only data */
20060- .text : AT(ADDR(.text) - LOAD_OFFSET) {
20061- _text = .;
20062+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20063 /* bootstrapping code */
20064+#ifdef CONFIG_X86_32
20065+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20066+#else
20067+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20068+#endif
20069+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20070+ _text = .;
20071 HEAD_TEXT
20072 #ifdef CONFIG_X86_32
20073 . = ALIGN(PAGE_SIZE);
20074@@ -108,13 +128,47 @@ SECTIONS
20075 IRQENTRY_TEXT
20076 *(.fixup)
20077 *(.gnu.warning)
20078- /* End of text section */
20079- _etext = .;
20080 } :text = 0x9090
20081
20082- NOTES :text :note
20083+ . += __KERNEL_TEXT_OFFSET;
20084
20085- EXCEPTION_TABLE(16) :text = 0x9090
20086+#ifdef CONFIG_X86_32
20087+ . = ALIGN(PAGE_SIZE);
20088+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
20089+
20090+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
20091+ MODULES_EXEC_VADDR = .;
20092+ BYTE(0)
20093+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
20094+ . = ALIGN(HPAGE_SIZE);
20095+ MODULES_EXEC_END = . - 1;
20096+#endif
20097+
20098+ } :module
20099+#endif
20100+
20101+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
20102+ /* End of text section */
20103+ _etext = . - __KERNEL_TEXT_OFFSET;
20104+ }
20105+
20106+#ifdef CONFIG_X86_32
20107+ . = ALIGN(PAGE_SIZE);
20108+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
20109+ *(.idt)
20110+ . = ALIGN(PAGE_SIZE);
20111+ *(.empty_zero_page)
20112+ *(.initial_pg_fixmap)
20113+ *(.initial_pg_pmd)
20114+ *(.initial_page_table)
20115+ *(.swapper_pg_dir)
20116+ } :rodata
20117+#endif
20118+
20119+ . = ALIGN(PAGE_SIZE);
20120+ NOTES :rodata :note
20121+
20122+ EXCEPTION_TABLE(16) :rodata
20123
20124 #if defined(CONFIG_DEBUG_RODATA)
20125 /* .text should occupy whole number of pages */
20126@@ -126,16 +180,20 @@ SECTIONS
20127
20128 /* Data */
20129 .data : AT(ADDR(.data) - LOAD_OFFSET) {
20130+
20131+#ifdef CONFIG_PAX_KERNEXEC
20132+ . = ALIGN(HPAGE_SIZE);
20133+#else
20134+ . = ALIGN(PAGE_SIZE);
20135+#endif
20136+
20137 /* Start of data section */
20138 _sdata = .;
20139
20140 /* init_task */
20141 INIT_TASK_DATA(THREAD_SIZE)
20142
20143-#ifdef CONFIG_X86_32
20144- /* 32 bit has nosave before _edata */
20145 NOSAVE_DATA
20146-#endif
20147
20148 PAGE_ALIGNED_DATA(PAGE_SIZE)
20149
20150@@ -176,12 +234,19 @@ SECTIONS
20151 #endif /* CONFIG_X86_64 */
20152
20153 /* Init code and data - will be freed after init */
20154- . = ALIGN(PAGE_SIZE);
20155 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
20156+ BYTE(0)
20157+
20158+#ifdef CONFIG_PAX_KERNEXEC
20159+ . = ALIGN(HPAGE_SIZE);
20160+#else
20161+ . = ALIGN(PAGE_SIZE);
20162+#endif
20163+
20164 __init_begin = .; /* paired with __init_end */
20165- }
20166+ } :init.begin
20167
20168-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
20169+#ifdef CONFIG_SMP
20170 /*
20171 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
20172 * output PHDR, so the next output section - .init.text - should
20173@@ -190,12 +255,27 @@ SECTIONS
20174 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
20175 #endif
20176
20177- INIT_TEXT_SECTION(PAGE_SIZE)
20178-#ifdef CONFIG_X86_64
20179- :init
20180-#endif
20181+ . = ALIGN(PAGE_SIZE);
20182+ init_begin = .;
20183+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
20184+ VMLINUX_SYMBOL(_sinittext) = .;
20185+ INIT_TEXT
20186+ VMLINUX_SYMBOL(_einittext) = .;
20187+ . = ALIGN(PAGE_SIZE);
20188+ } :text.init
20189
20190- INIT_DATA_SECTION(16)
20191+ /*
20192+ * .exit.text is discard at runtime, not link time, to deal with
20193+ * references from .altinstructions and .eh_frame
20194+ */
20195+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20196+ EXIT_TEXT
20197+ . = ALIGN(16);
20198+ } :text.exit
20199+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
20200+
20201+ . = ALIGN(PAGE_SIZE);
20202+ INIT_DATA_SECTION(16) :init
20203
20204 /*
20205 * Code and data for a variety of lowlevel trampolines, to be
20206@@ -269,19 +349,12 @@ SECTIONS
20207 }
20208
20209 . = ALIGN(8);
20210- /*
20211- * .exit.text is discard at runtime, not link time, to deal with
20212- * references from .altinstructions and .eh_frame
20213- */
20214- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
20215- EXIT_TEXT
20216- }
20217
20218 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
20219 EXIT_DATA
20220 }
20221
20222-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
20223+#ifndef CONFIG_SMP
20224 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
20225 #endif
20226
20227@@ -300,16 +373,10 @@ SECTIONS
20228 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
20229 __smp_locks = .;
20230 *(.smp_locks)
20231- . = ALIGN(PAGE_SIZE);
20232 __smp_locks_end = .;
20233+ . = ALIGN(PAGE_SIZE);
20234 }
20235
20236-#ifdef CONFIG_X86_64
20237- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
20238- NOSAVE_DATA
20239- }
20240-#endif
20241-
20242 /* BSS */
20243 . = ALIGN(PAGE_SIZE);
20244 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
20245@@ -325,6 +392,7 @@ SECTIONS
20246 __brk_base = .;
20247 . += 64 * 1024; /* 64k alignment slop space */
20248 *(.brk_reservation) /* areas brk users have reserved */
20249+ . = ALIGN(HPAGE_SIZE);
20250 __brk_limit = .;
20251 }
20252
20253@@ -351,13 +419,12 @@ SECTIONS
20254 * for the boot processor.
20255 */
20256 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
20257-INIT_PER_CPU(gdt_page);
20258 INIT_PER_CPU(irq_stack_union);
20259
20260 /*
20261 * Build-time check on the image size:
20262 */
20263-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
20264+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
20265 "kernel image bigger than KERNEL_IMAGE_SIZE");
20266
20267 #ifdef CONFIG_SMP
20268diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
20269index 7515cf0..331a1a0 100644
20270--- a/arch/x86/kernel/vsyscall_64.c
20271+++ b/arch/x86/kernel/vsyscall_64.c
20272@@ -54,15 +54,13 @@
20273 DEFINE_VVAR(int, vgetcpu_mode);
20274 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
20275
20276-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
20277+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
20278
20279 static int __init vsyscall_setup(char *str)
20280 {
20281 if (str) {
20282 if (!strcmp("emulate", str))
20283 vsyscall_mode = EMULATE;
20284- else if (!strcmp("native", str))
20285- vsyscall_mode = NATIVE;
20286 else if (!strcmp("none", str))
20287 vsyscall_mode = NONE;
20288 else
20289@@ -206,7 +204,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
20290
20291 tsk = current;
20292 if (seccomp_mode(&tsk->seccomp))
20293- do_exit(SIGKILL);
20294+ do_group_exit(SIGKILL);
20295
20296 /*
20297 * With a real vsyscall, page faults cause SIGSEGV. We want to
20298@@ -278,8 +276,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
20299 return true;
20300
20301 sigsegv:
20302- force_sig(SIGSEGV, current);
20303- return true;
20304+ do_group_exit(SIGKILL);
20305 }
20306
20307 /*
20308@@ -332,10 +329,7 @@ void __init map_vsyscall(void)
20309 extern char __vvar_page;
20310 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
20311
20312- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
20313- vsyscall_mode == NATIVE
20314- ? PAGE_KERNEL_VSYSCALL
20315- : PAGE_KERNEL_VVAR);
20316+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
20317 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
20318 (unsigned long)VSYSCALL_START);
20319
20320diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
20321index 9796c2f..f686fbf 100644
20322--- a/arch/x86/kernel/x8664_ksyms_64.c
20323+++ b/arch/x86/kernel/x8664_ksyms_64.c
20324@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
20325 EXPORT_SYMBOL(copy_user_generic_string);
20326 EXPORT_SYMBOL(copy_user_generic_unrolled);
20327 EXPORT_SYMBOL(__copy_user_nocache);
20328-EXPORT_SYMBOL(_copy_from_user);
20329-EXPORT_SYMBOL(_copy_to_user);
20330
20331 EXPORT_SYMBOL(copy_page);
20332 EXPORT_SYMBOL(clear_page);
20333diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
20334index e62728e..5fc3a07 100644
20335--- a/arch/x86/kernel/xsave.c
20336+++ b/arch/x86/kernel/xsave.c
20337@@ -131,7 +131,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
20338 fx_sw_user->xstate_size > fx_sw_user->extended_size)
20339 return -EINVAL;
20340
20341- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
20342+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
20343 fx_sw_user->extended_size -
20344 FP_XSTATE_MAGIC2_SIZE));
20345 if (err)
20346@@ -267,7 +267,7 @@ fx_only:
20347 * the other extended state.
20348 */
20349 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
20350- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
20351+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
20352 }
20353
20354 /*
20355@@ -296,7 +296,7 @@ int restore_i387_xstate(void __user *buf)
20356 if (use_xsave())
20357 err = restore_user_xstate(buf);
20358 else
20359- err = fxrstor_checking((__force struct i387_fxsave_struct *)
20360+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
20361 buf);
20362 if (unlikely(err)) {
20363 /*
20364diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
20365index 9fed5be..18fd595 100644
20366--- a/arch/x86/kvm/cpuid.c
20367+++ b/arch/x86/kvm/cpuid.c
20368@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
20369 struct kvm_cpuid2 *cpuid,
20370 struct kvm_cpuid_entry2 __user *entries)
20371 {
20372- int r;
20373+ int r, i;
20374
20375 r = -E2BIG;
20376 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
20377 goto out;
20378 r = -EFAULT;
20379- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
20380- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20381+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20382 goto out;
20383+ for (i = 0; i < cpuid->nent; ++i) {
20384+ struct kvm_cpuid_entry2 cpuid_entry;
20385+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
20386+ goto out;
20387+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
20388+ }
20389 vcpu->arch.cpuid_nent = cpuid->nent;
20390 kvm_apic_set_version(vcpu);
20391 kvm_x86_ops->cpuid_update(vcpu);
20392@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
20393 struct kvm_cpuid2 *cpuid,
20394 struct kvm_cpuid_entry2 __user *entries)
20395 {
20396- int r;
20397+ int r, i;
20398
20399 r = -E2BIG;
20400 if (cpuid->nent < vcpu->arch.cpuid_nent)
20401 goto out;
20402 r = -EFAULT;
20403- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
20404- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20405+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20406 goto out;
20407+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
20408+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
20409+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
20410+ goto out;
20411+ }
20412 return 0;
20413
20414 out:
20415diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
20416index 8375622..b7bca1a 100644
20417--- a/arch/x86/kvm/emulate.c
20418+++ b/arch/x86/kvm/emulate.c
20419@@ -252,6 +252,7 @@ struct gprefix {
20420
20421 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
20422 do { \
20423+ unsigned long _tmp; \
20424 __asm__ __volatile__ ( \
20425 _PRE_EFLAGS("0", "4", "2") \
20426 _op _suffix " %"_x"3,%1; " \
20427@@ -266,8 +267,6 @@ struct gprefix {
20428 /* Raw emulation: instruction has two explicit operands. */
20429 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
20430 do { \
20431- unsigned long _tmp; \
20432- \
20433 switch ((ctxt)->dst.bytes) { \
20434 case 2: \
20435 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
20436@@ -283,7 +282,6 @@ struct gprefix {
20437
20438 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
20439 do { \
20440- unsigned long _tmp; \
20441 switch ((ctxt)->dst.bytes) { \
20442 case 1: \
20443 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
20444diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
20445index 8584322..17d5955 100644
20446--- a/arch/x86/kvm/lapic.c
20447+++ b/arch/x86/kvm/lapic.c
20448@@ -54,7 +54,7 @@
20449 #define APIC_BUS_CYCLE_NS 1
20450
20451 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
20452-#define apic_debug(fmt, arg...)
20453+#define apic_debug(fmt, arg...) do {} while (0)
20454
20455 #define APIC_LVT_NUM 6
20456 /* 14 is the version for Xeon and Pentium 8.4.8*/
20457diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
20458index df5a703..63748a7 100644
20459--- a/arch/x86/kvm/paging_tmpl.h
20460+++ b/arch/x86/kvm/paging_tmpl.h
20461@@ -197,7 +197,7 @@ retry_walk:
20462 if (unlikely(kvm_is_error_hva(host_addr)))
20463 goto error;
20464
20465- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
20466+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
20467 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
20468 goto error;
20469
20470diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
20471index e334389..6839087 100644
20472--- a/arch/x86/kvm/svm.c
20473+++ b/arch/x86/kvm/svm.c
20474@@ -3509,7 +3509,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
20475 int cpu = raw_smp_processor_id();
20476
20477 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
20478+
20479+ pax_open_kernel();
20480 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
20481+ pax_close_kernel();
20482+
20483 load_TR_desc();
20484 }
20485
20486@@ -3887,6 +3891,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
20487 #endif
20488 #endif
20489
20490+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20491+ __set_fs(current_thread_info()->addr_limit);
20492+#endif
20493+
20494 reload_tss(vcpu);
20495
20496 local_irq_disable();
20497diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
20498index 4ff0ab9..2ff68d3 100644
20499--- a/arch/x86/kvm/vmx.c
20500+++ b/arch/x86/kvm/vmx.c
20501@@ -1303,7 +1303,11 @@ static void reload_tss(void)
20502 struct desc_struct *descs;
20503
20504 descs = (void *)gdt->address;
20505+
20506+ pax_open_kernel();
20507 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
20508+ pax_close_kernel();
20509+
20510 load_TR_desc();
20511 }
20512
20513@@ -2625,8 +2629,11 @@ static __init int hardware_setup(void)
20514 if (!cpu_has_vmx_flexpriority())
20515 flexpriority_enabled = 0;
20516
20517- if (!cpu_has_vmx_tpr_shadow())
20518- kvm_x86_ops->update_cr8_intercept = NULL;
20519+ if (!cpu_has_vmx_tpr_shadow()) {
20520+ pax_open_kernel();
20521+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
20522+ pax_close_kernel();
20523+ }
20524
20525 if (enable_ept && !cpu_has_vmx_ept_2m_page())
20526 kvm_disable_largepages();
20527@@ -3642,7 +3649,7 @@ static void vmx_set_constant_host_state(void)
20528 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
20529
20530 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
20531- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
20532+ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
20533
20534 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
20535 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
20536@@ -6180,6 +6187,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20537 "jmp .Lkvm_vmx_return \n\t"
20538 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
20539 ".Lkvm_vmx_return: "
20540+
20541+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20542+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
20543+ ".Lkvm_vmx_return2: "
20544+#endif
20545+
20546 /* Save guest registers, load host registers, keep flags */
20547 "mov %0, %c[wordsize](%%"R"sp) \n\t"
20548 "pop %0 \n\t"
20549@@ -6228,6 +6241,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20550 #endif
20551 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
20552 [wordsize]"i"(sizeof(ulong))
20553+
20554+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20555+ ,[cs]"i"(__KERNEL_CS)
20556+#endif
20557+
20558 : "cc", "memory"
20559 , R"ax", R"bx", R"di", R"si"
20560 #ifdef CONFIG_X86_64
20561@@ -6256,7 +6274,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20562 }
20563 }
20564
20565- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
20566+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
20567+
20568+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20569+ loadsegment(fs, __KERNEL_PERCPU);
20570+#endif
20571+
20572+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20573+ __set_fs(current_thread_info()->addr_limit);
20574+#endif
20575+
20576 vmx->loaded_vmcs->launched = 1;
20577
20578 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
20579diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
20580index 185a2b8..866d2a6 100644
20581--- a/arch/x86/kvm/x86.c
20582+++ b/arch/x86/kvm/x86.c
20583@@ -1357,8 +1357,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
20584 {
20585 struct kvm *kvm = vcpu->kvm;
20586 int lm = is_long_mode(vcpu);
20587- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20588- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20589+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20590+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20591 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
20592 : kvm->arch.xen_hvm_config.blob_size_32;
20593 u32 page_num = data & ~PAGE_MASK;
20594@@ -2213,6 +2213,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
20595 if (n < msr_list.nmsrs)
20596 goto out;
20597 r = -EFAULT;
20598+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
20599+ goto out;
20600 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
20601 num_msrs_to_save * sizeof(u32)))
20602 goto out;
20603@@ -2338,7 +2340,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
20604 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
20605 struct kvm_interrupt *irq)
20606 {
20607- if (irq->irq < 0 || irq->irq >= 256)
20608+ if (irq->irq >= 256)
20609 return -EINVAL;
20610 if (irqchip_in_kernel(vcpu->kvm))
20611 return -ENXIO;
20612@@ -4860,7 +4862,7 @@ static void kvm_set_mmio_spte_mask(void)
20613 kvm_mmu_set_mmio_spte_mask(mask);
20614 }
20615
20616-int kvm_arch_init(void *opaque)
20617+int kvm_arch_init(const void *opaque)
20618 {
20619 int r;
20620 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
20621diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
20622index 642d880..44e0f3f 100644
20623--- a/arch/x86/lguest/boot.c
20624+++ b/arch/x86/lguest/boot.c
20625@@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
20626 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
20627 * Launcher to reboot us.
20628 */
20629-static void lguest_restart(char *reason)
20630+static __noreturn void lguest_restart(char *reason)
20631 {
20632 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
20633+ BUG();
20634 }
20635
20636 /*G:050
20637diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
20638index 00933d5..3a64af9 100644
20639--- a/arch/x86/lib/atomic64_386_32.S
20640+++ b/arch/x86/lib/atomic64_386_32.S
20641@@ -48,6 +48,10 @@ BEGIN(read)
20642 movl (v), %eax
20643 movl 4(v), %edx
20644 RET_ENDP
20645+BEGIN(read_unchecked)
20646+ movl (v), %eax
20647+ movl 4(v), %edx
20648+RET_ENDP
20649 #undef v
20650
20651 #define v %esi
20652@@ -55,6 +59,10 @@ BEGIN(set)
20653 movl %ebx, (v)
20654 movl %ecx, 4(v)
20655 RET_ENDP
20656+BEGIN(set_unchecked)
20657+ movl %ebx, (v)
20658+ movl %ecx, 4(v)
20659+RET_ENDP
20660 #undef v
20661
20662 #define v %esi
20663@@ -70,6 +78,20 @@ RET_ENDP
20664 BEGIN(add)
20665 addl %eax, (v)
20666 adcl %edx, 4(v)
20667+
20668+#ifdef CONFIG_PAX_REFCOUNT
20669+ jno 0f
20670+ subl %eax, (v)
20671+ sbbl %edx, 4(v)
20672+ int $4
20673+0:
20674+ _ASM_EXTABLE(0b, 0b)
20675+#endif
20676+
20677+RET_ENDP
20678+BEGIN(add_unchecked)
20679+ addl %eax, (v)
20680+ adcl %edx, 4(v)
20681 RET_ENDP
20682 #undef v
20683
20684@@ -77,6 +99,24 @@ RET_ENDP
20685 BEGIN(add_return)
20686 addl (v), %eax
20687 adcl 4(v), %edx
20688+
20689+#ifdef CONFIG_PAX_REFCOUNT
20690+ into
20691+1234:
20692+ _ASM_EXTABLE(1234b, 2f)
20693+#endif
20694+
20695+ movl %eax, (v)
20696+ movl %edx, 4(v)
20697+
20698+#ifdef CONFIG_PAX_REFCOUNT
20699+2:
20700+#endif
20701+
20702+RET_ENDP
20703+BEGIN(add_return_unchecked)
20704+ addl (v), %eax
20705+ adcl 4(v), %edx
20706 movl %eax, (v)
20707 movl %edx, 4(v)
20708 RET_ENDP
20709@@ -86,6 +126,20 @@ RET_ENDP
20710 BEGIN(sub)
20711 subl %eax, (v)
20712 sbbl %edx, 4(v)
20713+
20714+#ifdef CONFIG_PAX_REFCOUNT
20715+ jno 0f
20716+ addl %eax, (v)
20717+ adcl %edx, 4(v)
20718+ int $4
20719+0:
20720+ _ASM_EXTABLE(0b, 0b)
20721+#endif
20722+
20723+RET_ENDP
20724+BEGIN(sub_unchecked)
20725+ subl %eax, (v)
20726+ sbbl %edx, 4(v)
20727 RET_ENDP
20728 #undef v
20729
20730@@ -96,6 +150,27 @@ BEGIN(sub_return)
20731 sbbl $0, %edx
20732 addl (v), %eax
20733 adcl 4(v), %edx
20734+
20735+#ifdef CONFIG_PAX_REFCOUNT
20736+ into
20737+1234:
20738+ _ASM_EXTABLE(1234b, 2f)
20739+#endif
20740+
20741+ movl %eax, (v)
20742+ movl %edx, 4(v)
20743+
20744+#ifdef CONFIG_PAX_REFCOUNT
20745+2:
20746+#endif
20747+
20748+RET_ENDP
20749+BEGIN(sub_return_unchecked)
20750+ negl %edx
20751+ negl %eax
20752+ sbbl $0, %edx
20753+ addl (v), %eax
20754+ adcl 4(v), %edx
20755 movl %eax, (v)
20756 movl %edx, 4(v)
20757 RET_ENDP
20758@@ -105,6 +180,20 @@ RET_ENDP
20759 BEGIN(inc)
20760 addl $1, (v)
20761 adcl $0, 4(v)
20762+
20763+#ifdef CONFIG_PAX_REFCOUNT
20764+ jno 0f
20765+ subl $1, (v)
20766+ sbbl $0, 4(v)
20767+ int $4
20768+0:
20769+ _ASM_EXTABLE(0b, 0b)
20770+#endif
20771+
20772+RET_ENDP
20773+BEGIN(inc_unchecked)
20774+ addl $1, (v)
20775+ adcl $0, 4(v)
20776 RET_ENDP
20777 #undef v
20778
20779@@ -114,6 +203,26 @@ BEGIN(inc_return)
20780 movl 4(v), %edx
20781 addl $1, %eax
20782 adcl $0, %edx
20783+
20784+#ifdef CONFIG_PAX_REFCOUNT
20785+ into
20786+1234:
20787+ _ASM_EXTABLE(1234b, 2f)
20788+#endif
20789+
20790+ movl %eax, (v)
20791+ movl %edx, 4(v)
20792+
20793+#ifdef CONFIG_PAX_REFCOUNT
20794+2:
20795+#endif
20796+
20797+RET_ENDP
20798+BEGIN(inc_return_unchecked)
20799+ movl (v), %eax
20800+ movl 4(v), %edx
20801+ addl $1, %eax
20802+ adcl $0, %edx
20803 movl %eax, (v)
20804 movl %edx, 4(v)
20805 RET_ENDP
20806@@ -123,6 +232,20 @@ RET_ENDP
20807 BEGIN(dec)
20808 subl $1, (v)
20809 sbbl $0, 4(v)
20810+
20811+#ifdef CONFIG_PAX_REFCOUNT
20812+ jno 0f
20813+ addl $1, (v)
20814+ adcl $0, 4(v)
20815+ int $4
20816+0:
20817+ _ASM_EXTABLE(0b, 0b)
20818+#endif
20819+
20820+RET_ENDP
20821+BEGIN(dec_unchecked)
20822+ subl $1, (v)
20823+ sbbl $0, 4(v)
20824 RET_ENDP
20825 #undef v
20826
20827@@ -132,6 +255,26 @@ BEGIN(dec_return)
20828 movl 4(v), %edx
20829 subl $1, %eax
20830 sbbl $0, %edx
20831+
20832+#ifdef CONFIG_PAX_REFCOUNT
20833+ into
20834+1234:
20835+ _ASM_EXTABLE(1234b, 2f)
20836+#endif
20837+
20838+ movl %eax, (v)
20839+ movl %edx, 4(v)
20840+
20841+#ifdef CONFIG_PAX_REFCOUNT
20842+2:
20843+#endif
20844+
20845+RET_ENDP
20846+BEGIN(dec_return_unchecked)
20847+ movl (v), %eax
20848+ movl 4(v), %edx
20849+ subl $1, %eax
20850+ sbbl $0, %edx
20851 movl %eax, (v)
20852 movl %edx, 4(v)
20853 RET_ENDP
20854@@ -143,6 +286,13 @@ BEGIN(add_unless)
20855 adcl %edx, %edi
20856 addl (v), %eax
20857 adcl 4(v), %edx
20858+
20859+#ifdef CONFIG_PAX_REFCOUNT
20860+ into
20861+1234:
20862+ _ASM_EXTABLE(1234b, 2f)
20863+#endif
20864+
20865 cmpl %eax, %ecx
20866 je 3f
20867 1:
20868@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
20869 1:
20870 addl $1, %eax
20871 adcl $0, %edx
20872+
20873+#ifdef CONFIG_PAX_REFCOUNT
20874+ into
20875+1234:
20876+ _ASM_EXTABLE(1234b, 2f)
20877+#endif
20878+
20879 movl %eax, (v)
20880 movl %edx, 4(v)
20881 movl $1, %eax
20882@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
20883 movl 4(v), %edx
20884 subl $1, %eax
20885 sbbl $0, %edx
20886+
20887+#ifdef CONFIG_PAX_REFCOUNT
20888+ into
20889+1234:
20890+ _ASM_EXTABLE(1234b, 1f)
20891+#endif
20892+
20893 js 1f
20894 movl %eax, (v)
20895 movl %edx, 4(v)
20896diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
20897index f5cc9eb..51fa319 100644
20898--- a/arch/x86/lib/atomic64_cx8_32.S
20899+++ b/arch/x86/lib/atomic64_cx8_32.S
20900@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
20901 CFI_STARTPROC
20902
20903 read64 %ecx
20904+ pax_force_retaddr
20905 ret
20906 CFI_ENDPROC
20907 ENDPROC(atomic64_read_cx8)
20908
20909+ENTRY(atomic64_read_unchecked_cx8)
20910+ CFI_STARTPROC
20911+
20912+ read64 %ecx
20913+ pax_force_retaddr
20914+ ret
20915+ CFI_ENDPROC
20916+ENDPROC(atomic64_read_unchecked_cx8)
20917+
20918 ENTRY(atomic64_set_cx8)
20919 CFI_STARTPROC
20920
20921@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
20922 cmpxchg8b (%esi)
20923 jne 1b
20924
20925+ pax_force_retaddr
20926 ret
20927 CFI_ENDPROC
20928 ENDPROC(atomic64_set_cx8)
20929
20930+ENTRY(atomic64_set_unchecked_cx8)
20931+ CFI_STARTPROC
20932+
20933+1:
20934+/* we don't need LOCK_PREFIX since aligned 64-bit writes
20935+ * are atomic on 586 and newer */
20936+ cmpxchg8b (%esi)
20937+ jne 1b
20938+
20939+ pax_force_retaddr
20940+ ret
20941+ CFI_ENDPROC
20942+ENDPROC(atomic64_set_unchecked_cx8)
20943+
20944 ENTRY(atomic64_xchg_cx8)
20945 CFI_STARTPROC
20946
20947@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
20948 cmpxchg8b (%esi)
20949 jne 1b
20950
20951+ pax_force_retaddr
20952 ret
20953 CFI_ENDPROC
20954 ENDPROC(atomic64_xchg_cx8)
20955
20956-.macro addsub_return func ins insc
20957-ENTRY(atomic64_\func\()_return_cx8)
20958+.macro addsub_return func ins insc unchecked=""
20959+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
20960 CFI_STARTPROC
20961 SAVE ebp
20962 SAVE ebx
20963@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
20964 movl %edx, %ecx
20965 \ins\()l %esi, %ebx
20966 \insc\()l %edi, %ecx
20967+
20968+.ifb \unchecked
20969+#ifdef CONFIG_PAX_REFCOUNT
20970+ into
20971+2:
20972+ _ASM_EXTABLE(2b, 3f)
20973+#endif
20974+.endif
20975+
20976 LOCK_PREFIX
20977 cmpxchg8b (%ebp)
20978 jne 1b
20979-
20980-10:
20981 movl %ebx, %eax
20982 movl %ecx, %edx
20983+
20984+.ifb \unchecked
20985+#ifdef CONFIG_PAX_REFCOUNT
20986+3:
20987+#endif
20988+.endif
20989+
20990 RESTORE edi
20991 RESTORE esi
20992 RESTORE ebx
20993 RESTORE ebp
20994+ pax_force_retaddr
20995 ret
20996 CFI_ENDPROC
20997-ENDPROC(atomic64_\func\()_return_cx8)
20998+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
20999 .endm
21000
21001 addsub_return add add adc
21002 addsub_return sub sub sbb
21003+addsub_return add add adc _unchecked
21004+addsub_return sub sub sbb _unchecked
21005
21006-.macro incdec_return func ins insc
21007-ENTRY(atomic64_\func\()_return_cx8)
21008+.macro incdec_return func ins insc unchecked=""
21009+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
21010 CFI_STARTPROC
21011 SAVE ebx
21012
21013@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
21014 movl %edx, %ecx
21015 \ins\()l $1, %ebx
21016 \insc\()l $0, %ecx
21017+
21018+.ifb \unchecked
21019+#ifdef CONFIG_PAX_REFCOUNT
21020+ into
21021+2:
21022+ _ASM_EXTABLE(2b, 3f)
21023+#endif
21024+.endif
21025+
21026 LOCK_PREFIX
21027 cmpxchg8b (%esi)
21028 jne 1b
21029
21030-10:
21031 movl %ebx, %eax
21032 movl %ecx, %edx
21033+
21034+.ifb \unchecked
21035+#ifdef CONFIG_PAX_REFCOUNT
21036+3:
21037+#endif
21038+.endif
21039+
21040 RESTORE ebx
21041+ pax_force_retaddr
21042 ret
21043 CFI_ENDPROC
21044-ENDPROC(atomic64_\func\()_return_cx8)
21045+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
21046 .endm
21047
21048 incdec_return inc add adc
21049 incdec_return dec sub sbb
21050+incdec_return inc add adc _unchecked
21051+incdec_return dec sub sbb _unchecked
21052
21053 ENTRY(atomic64_dec_if_positive_cx8)
21054 CFI_STARTPROC
21055@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
21056 movl %edx, %ecx
21057 subl $1, %ebx
21058 sbb $0, %ecx
21059+
21060+#ifdef CONFIG_PAX_REFCOUNT
21061+ into
21062+1234:
21063+ _ASM_EXTABLE(1234b, 2f)
21064+#endif
21065+
21066 js 2f
21067 LOCK_PREFIX
21068 cmpxchg8b (%esi)
21069@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
21070 movl %ebx, %eax
21071 movl %ecx, %edx
21072 RESTORE ebx
21073+ pax_force_retaddr
21074 ret
21075 CFI_ENDPROC
21076 ENDPROC(atomic64_dec_if_positive_cx8)
21077@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
21078 movl %edx, %ecx
21079 addl %ebp, %ebx
21080 adcl %edi, %ecx
21081+
21082+#ifdef CONFIG_PAX_REFCOUNT
21083+ into
21084+1234:
21085+ _ASM_EXTABLE(1234b, 3f)
21086+#endif
21087+
21088 LOCK_PREFIX
21089 cmpxchg8b (%esi)
21090 jne 1b
21091@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
21092 CFI_ADJUST_CFA_OFFSET -8
21093 RESTORE ebx
21094 RESTORE ebp
21095+ pax_force_retaddr
21096 ret
21097 4:
21098 cmpl %edx, 4(%esp)
21099@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
21100 xorl %ecx, %ecx
21101 addl $1, %ebx
21102 adcl %edx, %ecx
21103+
21104+#ifdef CONFIG_PAX_REFCOUNT
21105+ into
21106+1234:
21107+ _ASM_EXTABLE(1234b, 3f)
21108+#endif
21109+
21110 LOCK_PREFIX
21111 cmpxchg8b (%esi)
21112 jne 1b
21113@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
21114 movl $1, %eax
21115 3:
21116 RESTORE ebx
21117+ pax_force_retaddr
21118 ret
21119 CFI_ENDPROC
21120 ENDPROC(atomic64_inc_not_zero_cx8)
21121diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21122index 78d16a5..fbcf666 100644
21123--- a/arch/x86/lib/checksum_32.S
21124+++ b/arch/x86/lib/checksum_32.S
21125@@ -28,7 +28,8 @@
21126 #include <linux/linkage.h>
21127 #include <asm/dwarf2.h>
21128 #include <asm/errno.h>
21129-
21130+#include <asm/segment.h>
21131+
21132 /*
21133 * computes a partial checksum, e.g. for TCP/UDP fragments
21134 */
21135@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21136
21137 #define ARGBASE 16
21138 #define FP 12
21139-
21140-ENTRY(csum_partial_copy_generic)
21141+
21142+ENTRY(csum_partial_copy_generic_to_user)
21143 CFI_STARTPROC
21144+
21145+#ifdef CONFIG_PAX_MEMORY_UDEREF
21146+ pushl_cfi %gs
21147+ popl_cfi %es
21148+ jmp csum_partial_copy_generic
21149+#endif
21150+
21151+ENTRY(csum_partial_copy_generic_from_user)
21152+
21153+#ifdef CONFIG_PAX_MEMORY_UDEREF
21154+ pushl_cfi %gs
21155+ popl_cfi %ds
21156+#endif
21157+
21158+ENTRY(csum_partial_copy_generic)
21159 subl $4,%esp
21160 CFI_ADJUST_CFA_OFFSET 4
21161 pushl_cfi %edi
21162@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
21163 jmp 4f
21164 SRC(1: movw (%esi), %bx )
21165 addl $2, %esi
21166-DST( movw %bx, (%edi) )
21167+DST( movw %bx, %es:(%edi) )
21168 addl $2, %edi
21169 addw %bx, %ax
21170 adcl $0, %eax
21171@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
21172 SRC(1: movl (%esi), %ebx )
21173 SRC( movl 4(%esi), %edx )
21174 adcl %ebx, %eax
21175-DST( movl %ebx, (%edi) )
21176+DST( movl %ebx, %es:(%edi) )
21177 adcl %edx, %eax
21178-DST( movl %edx, 4(%edi) )
21179+DST( movl %edx, %es:4(%edi) )
21180
21181 SRC( movl 8(%esi), %ebx )
21182 SRC( movl 12(%esi), %edx )
21183 adcl %ebx, %eax
21184-DST( movl %ebx, 8(%edi) )
21185+DST( movl %ebx, %es:8(%edi) )
21186 adcl %edx, %eax
21187-DST( movl %edx, 12(%edi) )
21188+DST( movl %edx, %es:12(%edi) )
21189
21190 SRC( movl 16(%esi), %ebx )
21191 SRC( movl 20(%esi), %edx )
21192 adcl %ebx, %eax
21193-DST( movl %ebx, 16(%edi) )
21194+DST( movl %ebx, %es:16(%edi) )
21195 adcl %edx, %eax
21196-DST( movl %edx, 20(%edi) )
21197+DST( movl %edx, %es:20(%edi) )
21198
21199 SRC( movl 24(%esi), %ebx )
21200 SRC( movl 28(%esi), %edx )
21201 adcl %ebx, %eax
21202-DST( movl %ebx, 24(%edi) )
21203+DST( movl %ebx, %es:24(%edi) )
21204 adcl %edx, %eax
21205-DST( movl %edx, 28(%edi) )
21206+DST( movl %edx, %es:28(%edi) )
21207
21208 lea 32(%esi), %esi
21209 lea 32(%edi), %edi
21210@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
21211 shrl $2, %edx # This clears CF
21212 SRC(3: movl (%esi), %ebx )
21213 adcl %ebx, %eax
21214-DST( movl %ebx, (%edi) )
21215+DST( movl %ebx, %es:(%edi) )
21216 lea 4(%esi), %esi
21217 lea 4(%edi), %edi
21218 dec %edx
21219@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
21220 jb 5f
21221 SRC( movw (%esi), %cx )
21222 leal 2(%esi), %esi
21223-DST( movw %cx, (%edi) )
21224+DST( movw %cx, %es:(%edi) )
21225 leal 2(%edi), %edi
21226 je 6f
21227 shll $16,%ecx
21228 SRC(5: movb (%esi), %cl )
21229-DST( movb %cl, (%edi) )
21230+DST( movb %cl, %es:(%edi) )
21231 6: addl %ecx, %eax
21232 adcl $0, %eax
21233 7:
21234@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
21235
21236 6001:
21237 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21238- movl $-EFAULT, (%ebx)
21239+ movl $-EFAULT, %ss:(%ebx)
21240
21241 # zero the complete destination - computing the rest
21242 # is too much work
21243@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
21244
21245 6002:
21246 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21247- movl $-EFAULT,(%ebx)
21248+ movl $-EFAULT,%ss:(%ebx)
21249 jmp 5000b
21250
21251 .previous
21252
21253+ pushl_cfi %ss
21254+ popl_cfi %ds
21255+ pushl_cfi %ss
21256+ popl_cfi %es
21257 popl_cfi %ebx
21258 CFI_RESTORE ebx
21259 popl_cfi %esi
21260@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
21261 popl_cfi %ecx # equivalent to addl $4,%esp
21262 ret
21263 CFI_ENDPROC
21264-ENDPROC(csum_partial_copy_generic)
21265+ENDPROC(csum_partial_copy_generic_to_user)
21266
21267 #else
21268
21269 /* Version for PentiumII/PPro */
21270
21271 #define ROUND1(x) \
21272+ nop; nop; nop; \
21273 SRC(movl x(%esi), %ebx ) ; \
21274 addl %ebx, %eax ; \
21275- DST(movl %ebx, x(%edi) ) ;
21276+ DST(movl %ebx, %es:x(%edi)) ;
21277
21278 #define ROUND(x) \
21279+ nop; nop; nop; \
21280 SRC(movl x(%esi), %ebx ) ; \
21281 adcl %ebx, %eax ; \
21282- DST(movl %ebx, x(%edi) ) ;
21283+ DST(movl %ebx, %es:x(%edi)) ;
21284
21285 #define ARGBASE 12
21286-
21287-ENTRY(csum_partial_copy_generic)
21288+
21289+ENTRY(csum_partial_copy_generic_to_user)
21290 CFI_STARTPROC
21291+
21292+#ifdef CONFIG_PAX_MEMORY_UDEREF
21293+ pushl_cfi %gs
21294+ popl_cfi %es
21295+ jmp csum_partial_copy_generic
21296+#endif
21297+
21298+ENTRY(csum_partial_copy_generic_from_user)
21299+
21300+#ifdef CONFIG_PAX_MEMORY_UDEREF
21301+ pushl_cfi %gs
21302+ popl_cfi %ds
21303+#endif
21304+
21305+ENTRY(csum_partial_copy_generic)
21306 pushl_cfi %ebx
21307 CFI_REL_OFFSET ebx, 0
21308 pushl_cfi %edi
21309@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
21310 subl %ebx, %edi
21311 lea -1(%esi),%edx
21312 andl $-32,%edx
21313- lea 3f(%ebx,%ebx), %ebx
21314+ lea 3f(%ebx,%ebx,2), %ebx
21315 testl %esi, %esi
21316 jmp *%ebx
21317 1: addl $64,%esi
21318@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
21319 jb 5f
21320 SRC( movw (%esi), %dx )
21321 leal 2(%esi), %esi
21322-DST( movw %dx, (%edi) )
21323+DST( movw %dx, %es:(%edi) )
21324 leal 2(%edi), %edi
21325 je 6f
21326 shll $16,%edx
21327 5:
21328 SRC( movb (%esi), %dl )
21329-DST( movb %dl, (%edi) )
21330+DST( movb %dl, %es:(%edi) )
21331 6: addl %edx, %eax
21332 adcl $0, %eax
21333 7:
21334 .section .fixup, "ax"
21335 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
21336- movl $-EFAULT, (%ebx)
21337+ movl $-EFAULT, %ss:(%ebx)
21338 # zero the complete destination (computing the rest is too much work)
21339 movl ARGBASE+8(%esp),%edi # dst
21340 movl ARGBASE+12(%esp),%ecx # len
21341@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
21342 rep; stosb
21343 jmp 7b
21344 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21345- movl $-EFAULT, (%ebx)
21346+ movl $-EFAULT, %ss:(%ebx)
21347 jmp 7b
21348 .previous
21349
21350+#ifdef CONFIG_PAX_MEMORY_UDEREF
21351+ pushl_cfi %ss
21352+ popl_cfi %ds
21353+ pushl_cfi %ss
21354+ popl_cfi %es
21355+#endif
21356+
21357 popl_cfi %esi
21358 CFI_RESTORE esi
21359 popl_cfi %edi
21360@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
21361 CFI_RESTORE ebx
21362 ret
21363 CFI_ENDPROC
21364-ENDPROC(csum_partial_copy_generic)
21365+ENDPROC(csum_partial_copy_generic_to_user)
21366
21367 #undef ROUND
21368 #undef ROUND1
21369diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
21370index f2145cf..cea889d 100644
21371--- a/arch/x86/lib/clear_page_64.S
21372+++ b/arch/x86/lib/clear_page_64.S
21373@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
21374 movl $4096/8,%ecx
21375 xorl %eax,%eax
21376 rep stosq
21377+ pax_force_retaddr
21378 ret
21379 CFI_ENDPROC
21380 ENDPROC(clear_page_c)
21381@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
21382 movl $4096,%ecx
21383 xorl %eax,%eax
21384 rep stosb
21385+ pax_force_retaddr
21386 ret
21387 CFI_ENDPROC
21388 ENDPROC(clear_page_c_e)
21389@@ -43,6 +45,7 @@ ENTRY(clear_page)
21390 leaq 64(%rdi),%rdi
21391 jnz .Lloop
21392 nop
21393+ pax_force_retaddr
21394 ret
21395 CFI_ENDPROC
21396 .Lclear_page_end:
21397@@ -58,7 +61,7 @@ ENDPROC(clear_page)
21398
21399 #include <asm/cpufeature.h>
21400
21401- .section .altinstr_replacement,"ax"
21402+ .section .altinstr_replacement,"a"
21403 1: .byte 0xeb /* jmp <disp8> */
21404 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
21405 2: .byte 0xeb /* jmp <disp8> */
21406diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
21407index 1e572c5..2a162cd 100644
21408--- a/arch/x86/lib/cmpxchg16b_emu.S
21409+++ b/arch/x86/lib/cmpxchg16b_emu.S
21410@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
21411
21412 popf
21413 mov $1, %al
21414+ pax_force_retaddr
21415 ret
21416
21417 not_same:
21418 popf
21419 xor %al,%al
21420+ pax_force_retaddr
21421 ret
21422
21423 CFI_ENDPROC
21424diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
21425index 6b34d04..dccb07f 100644
21426--- a/arch/x86/lib/copy_page_64.S
21427+++ b/arch/x86/lib/copy_page_64.S
21428@@ -9,6 +9,7 @@ copy_page_c:
21429 CFI_STARTPROC
21430 movl $4096/8,%ecx
21431 rep movsq
21432+ pax_force_retaddr
21433 ret
21434 CFI_ENDPROC
21435 ENDPROC(copy_page_c)
21436@@ -20,12 +21,14 @@ ENDPROC(copy_page_c)
21437
21438 ENTRY(copy_page)
21439 CFI_STARTPROC
21440- subq $2*8,%rsp
21441- CFI_ADJUST_CFA_OFFSET 2*8
21442+ subq $3*8,%rsp
21443+ CFI_ADJUST_CFA_OFFSET 3*8
21444 movq %rbx,(%rsp)
21445 CFI_REL_OFFSET rbx, 0
21446 movq %r12,1*8(%rsp)
21447 CFI_REL_OFFSET r12, 1*8
21448+ movq %r13,2*8(%rsp)
21449+ CFI_REL_OFFSET r13, 2*8
21450
21451 movl $(4096/64)-5,%ecx
21452 .p2align 4
21453@@ -37,7 +40,7 @@ ENTRY(copy_page)
21454 movq 16 (%rsi), %rdx
21455 movq 24 (%rsi), %r8
21456 movq 32 (%rsi), %r9
21457- movq 40 (%rsi), %r10
21458+ movq 40 (%rsi), %r13
21459 movq 48 (%rsi), %r11
21460 movq 56 (%rsi), %r12
21461
21462@@ -48,7 +51,7 @@ ENTRY(copy_page)
21463 movq %rdx, 16 (%rdi)
21464 movq %r8, 24 (%rdi)
21465 movq %r9, 32 (%rdi)
21466- movq %r10, 40 (%rdi)
21467+ movq %r13, 40 (%rdi)
21468 movq %r11, 48 (%rdi)
21469 movq %r12, 56 (%rdi)
21470
21471@@ -67,7 +70,7 @@ ENTRY(copy_page)
21472 movq 16 (%rsi), %rdx
21473 movq 24 (%rsi), %r8
21474 movq 32 (%rsi), %r9
21475- movq 40 (%rsi), %r10
21476+ movq 40 (%rsi), %r13
21477 movq 48 (%rsi), %r11
21478 movq 56 (%rsi), %r12
21479
21480@@ -76,7 +79,7 @@ ENTRY(copy_page)
21481 movq %rdx, 16 (%rdi)
21482 movq %r8, 24 (%rdi)
21483 movq %r9, 32 (%rdi)
21484- movq %r10, 40 (%rdi)
21485+ movq %r13, 40 (%rdi)
21486 movq %r11, 48 (%rdi)
21487 movq %r12, 56 (%rdi)
21488
21489@@ -89,8 +92,11 @@ ENTRY(copy_page)
21490 CFI_RESTORE rbx
21491 movq 1*8(%rsp),%r12
21492 CFI_RESTORE r12
21493- addq $2*8,%rsp
21494- CFI_ADJUST_CFA_OFFSET -2*8
21495+ movq 2*8(%rsp),%r13
21496+ CFI_RESTORE r13
21497+ addq $3*8,%rsp
21498+ CFI_ADJUST_CFA_OFFSET -3*8
21499+ pax_force_retaddr
21500 ret
21501 .Lcopy_page_end:
21502 CFI_ENDPROC
21503@@ -101,7 +107,7 @@ ENDPROC(copy_page)
21504
21505 #include <asm/cpufeature.h>
21506
21507- .section .altinstr_replacement,"ax"
21508+ .section .altinstr_replacement,"a"
21509 1: .byte 0xeb /* jmp <disp8> */
21510 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
21511 2:
21512diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
21513index 0248402..821c786 100644
21514--- a/arch/x86/lib/copy_user_64.S
21515+++ b/arch/x86/lib/copy_user_64.S
21516@@ -16,6 +16,7 @@
21517 #include <asm/thread_info.h>
21518 #include <asm/cpufeature.h>
21519 #include <asm/alternative-asm.h>
21520+#include <asm/pgtable.h>
21521
21522 /*
21523 * By placing feature2 after feature1 in altinstructions section, we logically
21524@@ -29,7 +30,7 @@
21525 .byte 0xe9 /* 32bit jump */
21526 .long \orig-1f /* by default jump to orig */
21527 1:
21528- .section .altinstr_replacement,"ax"
21529+ .section .altinstr_replacement,"a"
21530 2: .byte 0xe9 /* near jump with 32bit immediate */
21531 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
21532 3: .byte 0xe9 /* near jump with 32bit immediate */
21533@@ -71,47 +72,20 @@
21534 #endif
21535 .endm
21536
21537-/* Standard copy_to_user with segment limit checking */
21538-ENTRY(_copy_to_user)
21539- CFI_STARTPROC
21540- GET_THREAD_INFO(%rax)
21541- movq %rdi,%rcx
21542- addq %rdx,%rcx
21543- jc bad_to_user
21544- cmpq TI_addr_limit(%rax),%rcx
21545- ja bad_to_user
21546- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21547- copy_user_generic_unrolled,copy_user_generic_string, \
21548- copy_user_enhanced_fast_string
21549- CFI_ENDPROC
21550-ENDPROC(_copy_to_user)
21551-
21552-/* Standard copy_from_user with segment limit checking */
21553-ENTRY(_copy_from_user)
21554- CFI_STARTPROC
21555- GET_THREAD_INFO(%rax)
21556- movq %rsi,%rcx
21557- addq %rdx,%rcx
21558- jc bad_from_user
21559- cmpq TI_addr_limit(%rax),%rcx
21560- ja bad_from_user
21561- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21562- copy_user_generic_unrolled,copy_user_generic_string, \
21563- copy_user_enhanced_fast_string
21564- CFI_ENDPROC
21565-ENDPROC(_copy_from_user)
21566-
21567 .section .fixup,"ax"
21568 /* must zero dest */
21569 ENTRY(bad_from_user)
21570 bad_from_user:
21571 CFI_STARTPROC
21572+ testl %edx,%edx
21573+ js bad_to_user
21574 movl %edx,%ecx
21575 xorl %eax,%eax
21576 rep
21577 stosb
21578 bad_to_user:
21579 movl %edx,%eax
21580+ pax_force_retaddr
21581 ret
21582 CFI_ENDPROC
21583 ENDPROC(bad_from_user)
21584@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
21585 jz 17f
21586 1: movq (%rsi),%r8
21587 2: movq 1*8(%rsi),%r9
21588-3: movq 2*8(%rsi),%r10
21589+3: movq 2*8(%rsi),%rax
21590 4: movq 3*8(%rsi),%r11
21591 5: movq %r8,(%rdi)
21592 6: movq %r9,1*8(%rdi)
21593-7: movq %r10,2*8(%rdi)
21594+7: movq %rax,2*8(%rdi)
21595 8: movq %r11,3*8(%rdi)
21596 9: movq 4*8(%rsi),%r8
21597 10: movq 5*8(%rsi),%r9
21598-11: movq 6*8(%rsi),%r10
21599+11: movq 6*8(%rsi),%rax
21600 12: movq 7*8(%rsi),%r11
21601 13: movq %r8,4*8(%rdi)
21602 14: movq %r9,5*8(%rdi)
21603-15: movq %r10,6*8(%rdi)
21604+15: movq %rax,6*8(%rdi)
21605 16: movq %r11,7*8(%rdi)
21606 leaq 64(%rsi),%rsi
21607 leaq 64(%rdi),%rdi
21608@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
21609 decl %ecx
21610 jnz 21b
21611 23: xor %eax,%eax
21612+ pax_force_retaddr
21613 ret
21614
21615 .section .fixup,"ax"
21616@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
21617 3: rep
21618 movsb
21619 4: xorl %eax,%eax
21620+ pax_force_retaddr
21621 ret
21622
21623 .section .fixup,"ax"
21624@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
21625 1: rep
21626 movsb
21627 2: xorl %eax,%eax
21628+ pax_force_retaddr
21629 ret
21630
21631 .section .fixup,"ax"
21632diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
21633index cb0c112..e3a6895 100644
21634--- a/arch/x86/lib/copy_user_nocache_64.S
21635+++ b/arch/x86/lib/copy_user_nocache_64.S
21636@@ -8,12 +8,14 @@
21637
21638 #include <linux/linkage.h>
21639 #include <asm/dwarf2.h>
21640+#include <asm/alternative-asm.h>
21641
21642 #define FIX_ALIGNMENT 1
21643
21644 #include <asm/current.h>
21645 #include <asm/asm-offsets.h>
21646 #include <asm/thread_info.h>
21647+#include <asm/pgtable.h>
21648
21649 .macro ALIGN_DESTINATION
21650 #ifdef FIX_ALIGNMENT
21651@@ -50,6 +52,15 @@
21652 */
21653 ENTRY(__copy_user_nocache)
21654 CFI_STARTPROC
21655+
21656+#ifdef CONFIG_PAX_MEMORY_UDEREF
21657+ mov $PAX_USER_SHADOW_BASE,%rcx
21658+ cmp %rcx,%rsi
21659+ jae 1f
21660+ add %rcx,%rsi
21661+1:
21662+#endif
21663+
21664 cmpl $8,%edx
21665 jb 20f /* less then 8 bytes, go to byte copy loop */
21666 ALIGN_DESTINATION
21667@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
21668 jz 17f
21669 1: movq (%rsi),%r8
21670 2: movq 1*8(%rsi),%r9
21671-3: movq 2*8(%rsi),%r10
21672+3: movq 2*8(%rsi),%rax
21673 4: movq 3*8(%rsi),%r11
21674 5: movnti %r8,(%rdi)
21675 6: movnti %r9,1*8(%rdi)
21676-7: movnti %r10,2*8(%rdi)
21677+7: movnti %rax,2*8(%rdi)
21678 8: movnti %r11,3*8(%rdi)
21679 9: movq 4*8(%rsi),%r8
21680 10: movq 5*8(%rsi),%r9
21681-11: movq 6*8(%rsi),%r10
21682+11: movq 6*8(%rsi),%rax
21683 12: movq 7*8(%rsi),%r11
21684 13: movnti %r8,4*8(%rdi)
21685 14: movnti %r9,5*8(%rdi)
21686-15: movnti %r10,6*8(%rdi)
21687+15: movnti %rax,6*8(%rdi)
21688 16: movnti %r11,7*8(%rdi)
21689 leaq 64(%rsi),%rsi
21690 leaq 64(%rdi),%rdi
21691@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
21692 jnz 21b
21693 23: xorl %eax,%eax
21694 sfence
21695+ pax_force_retaddr
21696 ret
21697
21698 .section .fixup,"ax"
21699diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
21700index fb903b7..c92b7f7 100644
21701--- a/arch/x86/lib/csum-copy_64.S
21702+++ b/arch/x86/lib/csum-copy_64.S
21703@@ -8,6 +8,7 @@
21704 #include <linux/linkage.h>
21705 #include <asm/dwarf2.h>
21706 #include <asm/errno.h>
21707+#include <asm/alternative-asm.h>
21708
21709 /*
21710 * Checksum copy with exception handling.
21711@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
21712 CFI_RESTORE rbp
21713 addq $7*8, %rsp
21714 CFI_ADJUST_CFA_OFFSET -7*8
21715+ pax_force_retaddr 0, 1
21716 ret
21717 CFI_RESTORE_STATE
21718
21719diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
21720index 459b58a..9570bc7 100644
21721--- a/arch/x86/lib/csum-wrappers_64.c
21722+++ b/arch/x86/lib/csum-wrappers_64.c
21723@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
21724 len -= 2;
21725 }
21726 }
21727- isum = csum_partial_copy_generic((__force const void *)src,
21728+
21729+#ifdef CONFIG_PAX_MEMORY_UDEREF
21730+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21731+ src += PAX_USER_SHADOW_BASE;
21732+#endif
21733+
21734+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
21735 dst, len, isum, errp, NULL);
21736 if (unlikely(*errp))
21737 goto out_err;
21738@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
21739 }
21740
21741 *errp = 0;
21742- return csum_partial_copy_generic(src, (void __force *)dst,
21743+
21744+#ifdef CONFIG_PAX_MEMORY_UDEREF
21745+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
21746+ dst += PAX_USER_SHADOW_BASE;
21747+#endif
21748+
21749+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
21750 len, isum, NULL, errp);
21751 }
21752 EXPORT_SYMBOL(csum_partial_copy_to_user);
21753diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
21754index 51f1504..ddac4c1 100644
21755--- a/arch/x86/lib/getuser.S
21756+++ b/arch/x86/lib/getuser.S
21757@@ -33,15 +33,38 @@
21758 #include <asm/asm-offsets.h>
21759 #include <asm/thread_info.h>
21760 #include <asm/asm.h>
21761+#include <asm/segment.h>
21762+#include <asm/pgtable.h>
21763+#include <asm/alternative-asm.h>
21764+
21765+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21766+#define __copyuser_seg gs;
21767+#else
21768+#define __copyuser_seg
21769+#endif
21770
21771 .text
21772 ENTRY(__get_user_1)
21773 CFI_STARTPROC
21774+
21775+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21776 GET_THREAD_INFO(%_ASM_DX)
21777 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21778 jae bad_get_user
21779-1: movzb (%_ASM_AX),%edx
21780+
21781+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21782+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21783+ cmp %_ASM_DX,%_ASM_AX
21784+ jae 1234f
21785+ add %_ASM_DX,%_ASM_AX
21786+1234:
21787+#endif
21788+
21789+#endif
21790+
21791+1: __copyuser_seg movzb (%_ASM_AX),%edx
21792 xor %eax,%eax
21793+ pax_force_retaddr
21794 ret
21795 CFI_ENDPROC
21796 ENDPROC(__get_user_1)
21797@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
21798 ENTRY(__get_user_2)
21799 CFI_STARTPROC
21800 add $1,%_ASM_AX
21801+
21802+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21803 jc bad_get_user
21804 GET_THREAD_INFO(%_ASM_DX)
21805 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21806 jae bad_get_user
21807-2: movzwl -1(%_ASM_AX),%edx
21808+
21809+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21810+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21811+ cmp %_ASM_DX,%_ASM_AX
21812+ jae 1234f
21813+ add %_ASM_DX,%_ASM_AX
21814+1234:
21815+#endif
21816+
21817+#endif
21818+
21819+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
21820 xor %eax,%eax
21821+ pax_force_retaddr
21822 ret
21823 CFI_ENDPROC
21824 ENDPROC(__get_user_2)
21825@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
21826 ENTRY(__get_user_4)
21827 CFI_STARTPROC
21828 add $3,%_ASM_AX
21829+
21830+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21831 jc bad_get_user
21832 GET_THREAD_INFO(%_ASM_DX)
21833 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21834 jae bad_get_user
21835-3: mov -3(%_ASM_AX),%edx
21836+
21837+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21838+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21839+ cmp %_ASM_DX,%_ASM_AX
21840+ jae 1234f
21841+ add %_ASM_DX,%_ASM_AX
21842+1234:
21843+#endif
21844+
21845+#endif
21846+
21847+3: __copyuser_seg mov -3(%_ASM_AX),%edx
21848 xor %eax,%eax
21849+ pax_force_retaddr
21850 ret
21851 CFI_ENDPROC
21852 ENDPROC(__get_user_4)
21853@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
21854 GET_THREAD_INFO(%_ASM_DX)
21855 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21856 jae bad_get_user
21857+
21858+#ifdef CONFIG_PAX_MEMORY_UDEREF
21859+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21860+ cmp %_ASM_DX,%_ASM_AX
21861+ jae 1234f
21862+ add %_ASM_DX,%_ASM_AX
21863+1234:
21864+#endif
21865+
21866 4: movq -7(%_ASM_AX),%_ASM_DX
21867 xor %eax,%eax
21868+ pax_force_retaddr
21869 ret
21870 CFI_ENDPROC
21871 ENDPROC(__get_user_8)
21872@@ -91,6 +152,7 @@ bad_get_user:
21873 CFI_STARTPROC
21874 xor %edx,%edx
21875 mov $(-EFAULT),%_ASM_AX
21876+ pax_force_retaddr
21877 ret
21878 CFI_ENDPROC
21879 END(bad_get_user)
21880diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
21881index b1e6c4b..21ae8fc 100644
21882--- a/arch/x86/lib/insn.c
21883+++ b/arch/x86/lib/insn.c
21884@@ -21,6 +21,11 @@
21885 #include <linux/string.h>
21886 #include <asm/inat.h>
21887 #include <asm/insn.h>
21888+#ifdef __KERNEL__
21889+#include <asm/pgtable_types.h>
21890+#else
21891+#define ktla_ktva(addr) addr
21892+#endif
21893
21894 /* Verify next sizeof(t) bytes can be on the same instruction */
21895 #define validate_next(t, insn, n) \
21896@@ -49,8 +54,8 @@
21897 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
21898 {
21899 memset(insn, 0, sizeof(*insn));
21900- insn->kaddr = kaddr;
21901- insn->next_byte = kaddr;
21902+ insn->kaddr = ktla_ktva(kaddr);
21903+ insn->next_byte = ktla_ktva(kaddr);
21904 insn->x86_64 = x86_64 ? 1 : 0;
21905 insn->opnd_bytes = 4;
21906 if (x86_64)
21907diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
21908index 05a95e7..326f2fa 100644
21909--- a/arch/x86/lib/iomap_copy_64.S
21910+++ b/arch/x86/lib/iomap_copy_64.S
21911@@ -17,6 +17,7 @@
21912
21913 #include <linux/linkage.h>
21914 #include <asm/dwarf2.h>
21915+#include <asm/alternative-asm.h>
21916
21917 /*
21918 * override generic version in lib/iomap_copy.c
21919@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
21920 CFI_STARTPROC
21921 movl %edx,%ecx
21922 rep movsd
21923+ pax_force_retaddr
21924 ret
21925 CFI_ENDPROC
21926 ENDPROC(__iowrite32_copy)
21927diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
21928index 1c273be..da9cc0e 100644
21929--- a/arch/x86/lib/memcpy_64.S
21930+++ b/arch/x86/lib/memcpy_64.S
21931@@ -33,6 +33,7 @@
21932 rep movsq
21933 movl %edx, %ecx
21934 rep movsb
21935+ pax_force_retaddr
21936 ret
21937 .Lmemcpy_e:
21938 .previous
21939@@ -49,6 +50,7 @@
21940 movq %rdi, %rax
21941 movq %rdx, %rcx
21942 rep movsb
21943+ pax_force_retaddr
21944 ret
21945 .Lmemcpy_e_e:
21946 .previous
21947@@ -76,13 +78,13 @@ ENTRY(memcpy)
21948 */
21949 movq 0*8(%rsi), %r8
21950 movq 1*8(%rsi), %r9
21951- movq 2*8(%rsi), %r10
21952+ movq 2*8(%rsi), %rcx
21953 movq 3*8(%rsi), %r11
21954 leaq 4*8(%rsi), %rsi
21955
21956 movq %r8, 0*8(%rdi)
21957 movq %r9, 1*8(%rdi)
21958- movq %r10, 2*8(%rdi)
21959+ movq %rcx, 2*8(%rdi)
21960 movq %r11, 3*8(%rdi)
21961 leaq 4*8(%rdi), %rdi
21962 jae .Lcopy_forward_loop
21963@@ -105,12 +107,12 @@ ENTRY(memcpy)
21964 subq $0x20, %rdx
21965 movq -1*8(%rsi), %r8
21966 movq -2*8(%rsi), %r9
21967- movq -3*8(%rsi), %r10
21968+ movq -3*8(%rsi), %rcx
21969 movq -4*8(%rsi), %r11
21970 leaq -4*8(%rsi), %rsi
21971 movq %r8, -1*8(%rdi)
21972 movq %r9, -2*8(%rdi)
21973- movq %r10, -3*8(%rdi)
21974+ movq %rcx, -3*8(%rdi)
21975 movq %r11, -4*8(%rdi)
21976 leaq -4*8(%rdi), %rdi
21977 jae .Lcopy_backward_loop
21978@@ -130,12 +132,13 @@ ENTRY(memcpy)
21979 */
21980 movq 0*8(%rsi), %r8
21981 movq 1*8(%rsi), %r9
21982- movq -2*8(%rsi, %rdx), %r10
21983+ movq -2*8(%rsi, %rdx), %rcx
21984 movq -1*8(%rsi, %rdx), %r11
21985 movq %r8, 0*8(%rdi)
21986 movq %r9, 1*8(%rdi)
21987- movq %r10, -2*8(%rdi, %rdx)
21988+ movq %rcx, -2*8(%rdi, %rdx)
21989 movq %r11, -1*8(%rdi, %rdx)
21990+ pax_force_retaddr
21991 retq
21992 .p2align 4
21993 .Lless_16bytes:
21994@@ -148,6 +151,7 @@ ENTRY(memcpy)
21995 movq -1*8(%rsi, %rdx), %r9
21996 movq %r8, 0*8(%rdi)
21997 movq %r9, -1*8(%rdi, %rdx)
21998+ pax_force_retaddr
21999 retq
22000 .p2align 4
22001 .Lless_8bytes:
22002@@ -161,6 +165,7 @@ ENTRY(memcpy)
22003 movl -4(%rsi, %rdx), %r8d
22004 movl %ecx, (%rdi)
22005 movl %r8d, -4(%rdi, %rdx)
22006+ pax_force_retaddr
22007 retq
22008 .p2align 4
22009 .Lless_3bytes:
22010@@ -179,6 +184,7 @@ ENTRY(memcpy)
22011 movb %cl, (%rdi)
22012
22013 .Lend:
22014+ pax_force_retaddr
22015 retq
22016 CFI_ENDPROC
22017 ENDPROC(memcpy)
22018diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
22019index ee16461..c39c199 100644
22020--- a/arch/x86/lib/memmove_64.S
22021+++ b/arch/x86/lib/memmove_64.S
22022@@ -61,13 +61,13 @@ ENTRY(memmove)
22023 5:
22024 sub $0x20, %rdx
22025 movq 0*8(%rsi), %r11
22026- movq 1*8(%rsi), %r10
22027+ movq 1*8(%rsi), %rcx
22028 movq 2*8(%rsi), %r9
22029 movq 3*8(%rsi), %r8
22030 leaq 4*8(%rsi), %rsi
22031
22032 movq %r11, 0*8(%rdi)
22033- movq %r10, 1*8(%rdi)
22034+ movq %rcx, 1*8(%rdi)
22035 movq %r9, 2*8(%rdi)
22036 movq %r8, 3*8(%rdi)
22037 leaq 4*8(%rdi), %rdi
22038@@ -81,10 +81,10 @@ ENTRY(memmove)
22039 4:
22040 movq %rdx, %rcx
22041 movq -8(%rsi, %rdx), %r11
22042- lea -8(%rdi, %rdx), %r10
22043+ lea -8(%rdi, %rdx), %r9
22044 shrq $3, %rcx
22045 rep movsq
22046- movq %r11, (%r10)
22047+ movq %r11, (%r9)
22048 jmp 13f
22049 .Lmemmove_end_forward:
22050
22051@@ -95,14 +95,14 @@ ENTRY(memmove)
22052 7:
22053 movq %rdx, %rcx
22054 movq (%rsi), %r11
22055- movq %rdi, %r10
22056+ movq %rdi, %r9
22057 leaq -8(%rsi, %rdx), %rsi
22058 leaq -8(%rdi, %rdx), %rdi
22059 shrq $3, %rcx
22060 std
22061 rep movsq
22062 cld
22063- movq %r11, (%r10)
22064+ movq %r11, (%r9)
22065 jmp 13f
22066
22067 /*
22068@@ -127,13 +127,13 @@ ENTRY(memmove)
22069 8:
22070 subq $0x20, %rdx
22071 movq -1*8(%rsi), %r11
22072- movq -2*8(%rsi), %r10
22073+ movq -2*8(%rsi), %rcx
22074 movq -3*8(%rsi), %r9
22075 movq -4*8(%rsi), %r8
22076 leaq -4*8(%rsi), %rsi
22077
22078 movq %r11, -1*8(%rdi)
22079- movq %r10, -2*8(%rdi)
22080+ movq %rcx, -2*8(%rdi)
22081 movq %r9, -3*8(%rdi)
22082 movq %r8, -4*8(%rdi)
22083 leaq -4*8(%rdi), %rdi
22084@@ -151,11 +151,11 @@ ENTRY(memmove)
22085 * Move data from 16 bytes to 31 bytes.
22086 */
22087 movq 0*8(%rsi), %r11
22088- movq 1*8(%rsi), %r10
22089+ movq 1*8(%rsi), %rcx
22090 movq -2*8(%rsi, %rdx), %r9
22091 movq -1*8(%rsi, %rdx), %r8
22092 movq %r11, 0*8(%rdi)
22093- movq %r10, 1*8(%rdi)
22094+ movq %rcx, 1*8(%rdi)
22095 movq %r9, -2*8(%rdi, %rdx)
22096 movq %r8, -1*8(%rdi, %rdx)
22097 jmp 13f
22098@@ -167,9 +167,9 @@ ENTRY(memmove)
22099 * Move data from 8 bytes to 15 bytes.
22100 */
22101 movq 0*8(%rsi), %r11
22102- movq -1*8(%rsi, %rdx), %r10
22103+ movq -1*8(%rsi, %rdx), %r9
22104 movq %r11, 0*8(%rdi)
22105- movq %r10, -1*8(%rdi, %rdx)
22106+ movq %r9, -1*8(%rdi, %rdx)
22107 jmp 13f
22108 10:
22109 cmpq $4, %rdx
22110@@ -178,9 +178,9 @@ ENTRY(memmove)
22111 * Move data from 4 bytes to 7 bytes.
22112 */
22113 movl (%rsi), %r11d
22114- movl -4(%rsi, %rdx), %r10d
22115+ movl -4(%rsi, %rdx), %r9d
22116 movl %r11d, (%rdi)
22117- movl %r10d, -4(%rdi, %rdx)
22118+ movl %r9d, -4(%rdi, %rdx)
22119 jmp 13f
22120 11:
22121 cmp $2, %rdx
22122@@ -189,9 +189,9 @@ ENTRY(memmove)
22123 * Move data from 2 bytes to 3 bytes.
22124 */
22125 movw (%rsi), %r11w
22126- movw -2(%rsi, %rdx), %r10w
22127+ movw -2(%rsi, %rdx), %r9w
22128 movw %r11w, (%rdi)
22129- movw %r10w, -2(%rdi, %rdx)
22130+ movw %r9w, -2(%rdi, %rdx)
22131 jmp 13f
22132 12:
22133 cmp $1, %rdx
22134@@ -202,6 +202,7 @@ ENTRY(memmove)
22135 movb (%rsi), %r11b
22136 movb %r11b, (%rdi)
22137 13:
22138+ pax_force_retaddr
22139 retq
22140 CFI_ENDPROC
22141
22142@@ -210,6 +211,7 @@ ENTRY(memmove)
22143 /* Forward moving data. */
22144 movq %rdx, %rcx
22145 rep movsb
22146+ pax_force_retaddr
22147 retq
22148 .Lmemmove_end_forward_efs:
22149 .previous
22150diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22151index 2dcb380..963660a 100644
22152--- a/arch/x86/lib/memset_64.S
22153+++ b/arch/x86/lib/memset_64.S
22154@@ -30,6 +30,7 @@
22155 movl %edx,%ecx
22156 rep stosb
22157 movq %r9,%rax
22158+ pax_force_retaddr
22159 ret
22160 .Lmemset_e:
22161 .previous
22162@@ -52,6 +53,7 @@
22163 movq %rdx,%rcx
22164 rep stosb
22165 movq %r9,%rax
22166+ pax_force_retaddr
22167 ret
22168 .Lmemset_e_e:
22169 .previous
22170@@ -59,7 +61,7 @@
22171 ENTRY(memset)
22172 ENTRY(__memset)
22173 CFI_STARTPROC
22174- movq %rdi,%r10
22175+ movq %rdi,%r11
22176
22177 /* expand byte value */
22178 movzbl %sil,%ecx
22179@@ -117,7 +119,8 @@ ENTRY(__memset)
22180 jnz .Lloop_1
22181
22182 .Lende:
22183- movq %r10,%rax
22184+ movq %r11,%rax
22185+ pax_force_retaddr
22186 ret
22187
22188 CFI_RESTORE_STATE
22189diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22190index c9f2d9b..e7fd2c0 100644
22191--- a/arch/x86/lib/mmx_32.c
22192+++ b/arch/x86/lib/mmx_32.c
22193@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22194 {
22195 void *p;
22196 int i;
22197+ unsigned long cr0;
22198
22199 if (unlikely(in_interrupt()))
22200 return __memcpy(to, from, len);
22201@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22202 kernel_fpu_begin();
22203
22204 __asm__ __volatile__ (
22205- "1: prefetch (%0)\n" /* This set is 28 bytes */
22206- " prefetch 64(%0)\n"
22207- " prefetch 128(%0)\n"
22208- " prefetch 192(%0)\n"
22209- " prefetch 256(%0)\n"
22210+ "1: prefetch (%1)\n" /* This set is 28 bytes */
22211+ " prefetch 64(%1)\n"
22212+ " prefetch 128(%1)\n"
22213+ " prefetch 192(%1)\n"
22214+ " prefetch 256(%1)\n"
22215 "2: \n"
22216 ".section .fixup, \"ax\"\n"
22217- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22218+ "3: \n"
22219+
22220+#ifdef CONFIG_PAX_KERNEXEC
22221+ " movl %%cr0, %0\n"
22222+ " movl %0, %%eax\n"
22223+ " andl $0xFFFEFFFF, %%eax\n"
22224+ " movl %%eax, %%cr0\n"
22225+#endif
22226+
22227+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22228+
22229+#ifdef CONFIG_PAX_KERNEXEC
22230+ " movl %0, %%cr0\n"
22231+#endif
22232+
22233 " jmp 2b\n"
22234 ".previous\n"
22235 _ASM_EXTABLE(1b, 3b)
22236- : : "r" (from));
22237+ : "=&r" (cr0) : "r" (from) : "ax");
22238
22239 for ( ; i > 5; i--) {
22240 __asm__ __volatile__ (
22241- "1: prefetch 320(%0)\n"
22242- "2: movq (%0), %%mm0\n"
22243- " movq 8(%0), %%mm1\n"
22244- " movq 16(%0), %%mm2\n"
22245- " movq 24(%0), %%mm3\n"
22246- " movq %%mm0, (%1)\n"
22247- " movq %%mm1, 8(%1)\n"
22248- " movq %%mm2, 16(%1)\n"
22249- " movq %%mm3, 24(%1)\n"
22250- " movq 32(%0), %%mm0\n"
22251- " movq 40(%0), %%mm1\n"
22252- " movq 48(%0), %%mm2\n"
22253- " movq 56(%0), %%mm3\n"
22254- " movq %%mm0, 32(%1)\n"
22255- " movq %%mm1, 40(%1)\n"
22256- " movq %%mm2, 48(%1)\n"
22257- " movq %%mm3, 56(%1)\n"
22258+ "1: prefetch 320(%1)\n"
22259+ "2: movq (%1), %%mm0\n"
22260+ " movq 8(%1), %%mm1\n"
22261+ " movq 16(%1), %%mm2\n"
22262+ " movq 24(%1), %%mm3\n"
22263+ " movq %%mm0, (%2)\n"
22264+ " movq %%mm1, 8(%2)\n"
22265+ " movq %%mm2, 16(%2)\n"
22266+ " movq %%mm3, 24(%2)\n"
22267+ " movq 32(%1), %%mm0\n"
22268+ " movq 40(%1), %%mm1\n"
22269+ " movq 48(%1), %%mm2\n"
22270+ " movq 56(%1), %%mm3\n"
22271+ " movq %%mm0, 32(%2)\n"
22272+ " movq %%mm1, 40(%2)\n"
22273+ " movq %%mm2, 48(%2)\n"
22274+ " movq %%mm3, 56(%2)\n"
22275 ".section .fixup, \"ax\"\n"
22276- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22277+ "3:\n"
22278+
22279+#ifdef CONFIG_PAX_KERNEXEC
22280+ " movl %%cr0, %0\n"
22281+ " movl %0, %%eax\n"
22282+ " andl $0xFFFEFFFF, %%eax\n"
22283+ " movl %%eax, %%cr0\n"
22284+#endif
22285+
22286+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22287+
22288+#ifdef CONFIG_PAX_KERNEXEC
22289+ " movl %0, %%cr0\n"
22290+#endif
22291+
22292 " jmp 2b\n"
22293 ".previous\n"
22294 _ASM_EXTABLE(1b, 3b)
22295- : : "r" (from), "r" (to) : "memory");
22296+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22297
22298 from += 64;
22299 to += 64;
22300@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22301 static void fast_copy_page(void *to, void *from)
22302 {
22303 int i;
22304+ unsigned long cr0;
22305
22306 kernel_fpu_begin();
22307
22308@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22309 * but that is for later. -AV
22310 */
22311 __asm__ __volatile__(
22312- "1: prefetch (%0)\n"
22313- " prefetch 64(%0)\n"
22314- " prefetch 128(%0)\n"
22315- " prefetch 192(%0)\n"
22316- " prefetch 256(%0)\n"
22317+ "1: prefetch (%1)\n"
22318+ " prefetch 64(%1)\n"
22319+ " prefetch 128(%1)\n"
22320+ " prefetch 192(%1)\n"
22321+ " prefetch 256(%1)\n"
22322 "2: \n"
22323 ".section .fixup, \"ax\"\n"
22324- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22325+ "3: \n"
22326+
22327+#ifdef CONFIG_PAX_KERNEXEC
22328+ " movl %%cr0, %0\n"
22329+ " movl %0, %%eax\n"
22330+ " andl $0xFFFEFFFF, %%eax\n"
22331+ " movl %%eax, %%cr0\n"
22332+#endif
22333+
22334+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22335+
22336+#ifdef CONFIG_PAX_KERNEXEC
22337+ " movl %0, %%cr0\n"
22338+#endif
22339+
22340 " jmp 2b\n"
22341 ".previous\n"
22342- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22343+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22344
22345 for (i = 0; i < (4096-320)/64; i++) {
22346 __asm__ __volatile__ (
22347- "1: prefetch 320(%0)\n"
22348- "2: movq (%0), %%mm0\n"
22349- " movntq %%mm0, (%1)\n"
22350- " movq 8(%0), %%mm1\n"
22351- " movntq %%mm1, 8(%1)\n"
22352- " movq 16(%0), %%mm2\n"
22353- " movntq %%mm2, 16(%1)\n"
22354- " movq 24(%0), %%mm3\n"
22355- " movntq %%mm3, 24(%1)\n"
22356- " movq 32(%0), %%mm4\n"
22357- " movntq %%mm4, 32(%1)\n"
22358- " movq 40(%0), %%mm5\n"
22359- " movntq %%mm5, 40(%1)\n"
22360- " movq 48(%0), %%mm6\n"
22361- " movntq %%mm6, 48(%1)\n"
22362- " movq 56(%0), %%mm7\n"
22363- " movntq %%mm7, 56(%1)\n"
22364+ "1: prefetch 320(%1)\n"
22365+ "2: movq (%1), %%mm0\n"
22366+ " movntq %%mm0, (%2)\n"
22367+ " movq 8(%1), %%mm1\n"
22368+ " movntq %%mm1, 8(%2)\n"
22369+ " movq 16(%1), %%mm2\n"
22370+ " movntq %%mm2, 16(%2)\n"
22371+ " movq 24(%1), %%mm3\n"
22372+ " movntq %%mm3, 24(%2)\n"
22373+ " movq 32(%1), %%mm4\n"
22374+ " movntq %%mm4, 32(%2)\n"
22375+ " movq 40(%1), %%mm5\n"
22376+ " movntq %%mm5, 40(%2)\n"
22377+ " movq 48(%1), %%mm6\n"
22378+ " movntq %%mm6, 48(%2)\n"
22379+ " movq 56(%1), %%mm7\n"
22380+ " movntq %%mm7, 56(%2)\n"
22381 ".section .fixup, \"ax\"\n"
22382- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22383+ "3:\n"
22384+
22385+#ifdef CONFIG_PAX_KERNEXEC
22386+ " movl %%cr0, %0\n"
22387+ " movl %0, %%eax\n"
22388+ " andl $0xFFFEFFFF, %%eax\n"
22389+ " movl %%eax, %%cr0\n"
22390+#endif
22391+
22392+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22393+
22394+#ifdef CONFIG_PAX_KERNEXEC
22395+ " movl %0, %%cr0\n"
22396+#endif
22397+
22398 " jmp 2b\n"
22399 ".previous\n"
22400- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22401+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22402
22403 from += 64;
22404 to += 64;
22405@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22406 static void fast_copy_page(void *to, void *from)
22407 {
22408 int i;
22409+ unsigned long cr0;
22410
22411 kernel_fpu_begin();
22412
22413 __asm__ __volatile__ (
22414- "1: prefetch (%0)\n"
22415- " prefetch 64(%0)\n"
22416- " prefetch 128(%0)\n"
22417- " prefetch 192(%0)\n"
22418- " prefetch 256(%0)\n"
22419+ "1: prefetch (%1)\n"
22420+ " prefetch 64(%1)\n"
22421+ " prefetch 128(%1)\n"
22422+ " prefetch 192(%1)\n"
22423+ " prefetch 256(%1)\n"
22424 "2: \n"
22425 ".section .fixup, \"ax\"\n"
22426- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22427+ "3: \n"
22428+
22429+#ifdef CONFIG_PAX_KERNEXEC
22430+ " movl %%cr0, %0\n"
22431+ " movl %0, %%eax\n"
22432+ " andl $0xFFFEFFFF, %%eax\n"
22433+ " movl %%eax, %%cr0\n"
22434+#endif
22435+
22436+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22437+
22438+#ifdef CONFIG_PAX_KERNEXEC
22439+ " movl %0, %%cr0\n"
22440+#endif
22441+
22442 " jmp 2b\n"
22443 ".previous\n"
22444- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22445+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22446
22447 for (i = 0; i < 4096/64; i++) {
22448 __asm__ __volatile__ (
22449- "1: prefetch 320(%0)\n"
22450- "2: movq (%0), %%mm0\n"
22451- " movq 8(%0), %%mm1\n"
22452- " movq 16(%0), %%mm2\n"
22453- " movq 24(%0), %%mm3\n"
22454- " movq %%mm0, (%1)\n"
22455- " movq %%mm1, 8(%1)\n"
22456- " movq %%mm2, 16(%1)\n"
22457- " movq %%mm3, 24(%1)\n"
22458- " movq 32(%0), %%mm0\n"
22459- " movq 40(%0), %%mm1\n"
22460- " movq 48(%0), %%mm2\n"
22461- " movq 56(%0), %%mm3\n"
22462- " movq %%mm0, 32(%1)\n"
22463- " movq %%mm1, 40(%1)\n"
22464- " movq %%mm2, 48(%1)\n"
22465- " movq %%mm3, 56(%1)\n"
22466+ "1: prefetch 320(%1)\n"
22467+ "2: movq (%1), %%mm0\n"
22468+ " movq 8(%1), %%mm1\n"
22469+ " movq 16(%1), %%mm2\n"
22470+ " movq 24(%1), %%mm3\n"
22471+ " movq %%mm0, (%2)\n"
22472+ " movq %%mm1, 8(%2)\n"
22473+ " movq %%mm2, 16(%2)\n"
22474+ " movq %%mm3, 24(%2)\n"
22475+ " movq 32(%1), %%mm0\n"
22476+ " movq 40(%1), %%mm1\n"
22477+ " movq 48(%1), %%mm2\n"
22478+ " movq 56(%1), %%mm3\n"
22479+ " movq %%mm0, 32(%2)\n"
22480+ " movq %%mm1, 40(%2)\n"
22481+ " movq %%mm2, 48(%2)\n"
22482+ " movq %%mm3, 56(%2)\n"
22483 ".section .fixup, \"ax\"\n"
22484- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22485+ "3:\n"
22486+
22487+#ifdef CONFIG_PAX_KERNEXEC
22488+ " movl %%cr0, %0\n"
22489+ " movl %0, %%eax\n"
22490+ " andl $0xFFFEFFFF, %%eax\n"
22491+ " movl %%eax, %%cr0\n"
22492+#endif
22493+
22494+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22495+
22496+#ifdef CONFIG_PAX_KERNEXEC
22497+ " movl %0, %%cr0\n"
22498+#endif
22499+
22500 " jmp 2b\n"
22501 ".previous\n"
22502 _ASM_EXTABLE(1b, 3b)
22503- : : "r" (from), "r" (to) : "memory");
22504+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22505
22506 from += 64;
22507 to += 64;
22508diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22509index 69fa106..adda88b 100644
22510--- a/arch/x86/lib/msr-reg.S
22511+++ b/arch/x86/lib/msr-reg.S
22512@@ -3,6 +3,7 @@
22513 #include <asm/dwarf2.h>
22514 #include <asm/asm.h>
22515 #include <asm/msr.h>
22516+#include <asm/alternative-asm.h>
22517
22518 #ifdef CONFIG_X86_64
22519 /*
22520@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22521 CFI_STARTPROC
22522 pushq_cfi %rbx
22523 pushq_cfi %rbp
22524- movq %rdi, %r10 /* Save pointer */
22525+ movq %rdi, %r9 /* Save pointer */
22526 xorl %r11d, %r11d /* Return value */
22527 movl (%rdi), %eax
22528 movl 4(%rdi), %ecx
22529@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22530 movl 28(%rdi), %edi
22531 CFI_REMEMBER_STATE
22532 1: \op
22533-2: movl %eax, (%r10)
22534+2: movl %eax, (%r9)
22535 movl %r11d, %eax /* Return value */
22536- movl %ecx, 4(%r10)
22537- movl %edx, 8(%r10)
22538- movl %ebx, 12(%r10)
22539- movl %ebp, 20(%r10)
22540- movl %esi, 24(%r10)
22541- movl %edi, 28(%r10)
22542+ movl %ecx, 4(%r9)
22543+ movl %edx, 8(%r9)
22544+ movl %ebx, 12(%r9)
22545+ movl %ebp, 20(%r9)
22546+ movl %esi, 24(%r9)
22547+ movl %edi, 28(%r9)
22548 popq_cfi %rbp
22549 popq_cfi %rbx
22550+ pax_force_retaddr
22551 ret
22552 3:
22553 CFI_RESTORE_STATE
22554diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
22555index 36b0d15..d381858 100644
22556--- a/arch/x86/lib/putuser.S
22557+++ b/arch/x86/lib/putuser.S
22558@@ -15,7 +15,9 @@
22559 #include <asm/thread_info.h>
22560 #include <asm/errno.h>
22561 #include <asm/asm.h>
22562-
22563+#include <asm/segment.h>
22564+#include <asm/pgtable.h>
22565+#include <asm/alternative-asm.h>
22566
22567 /*
22568 * __put_user_X
22569@@ -29,52 +31,119 @@
22570 * as they get called from within inline assembly.
22571 */
22572
22573-#define ENTER CFI_STARTPROC ; \
22574- GET_THREAD_INFO(%_ASM_BX)
22575-#define EXIT ret ; \
22576+#define ENTER CFI_STARTPROC
22577+#define EXIT pax_force_retaddr; ret ; \
22578 CFI_ENDPROC
22579
22580+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22581+#define _DEST %_ASM_CX,%_ASM_BX
22582+#else
22583+#define _DEST %_ASM_CX
22584+#endif
22585+
22586+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22587+#define __copyuser_seg gs;
22588+#else
22589+#define __copyuser_seg
22590+#endif
22591+
22592 .text
22593 ENTRY(__put_user_1)
22594 ENTER
22595+
22596+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22597+ GET_THREAD_INFO(%_ASM_BX)
22598 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
22599 jae bad_put_user
22600-1: movb %al,(%_ASM_CX)
22601+
22602+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22603+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22604+ cmp %_ASM_BX,%_ASM_CX
22605+ jb 1234f
22606+ xor %ebx,%ebx
22607+1234:
22608+#endif
22609+
22610+#endif
22611+
22612+1: __copyuser_seg movb %al,(_DEST)
22613 xor %eax,%eax
22614 EXIT
22615 ENDPROC(__put_user_1)
22616
22617 ENTRY(__put_user_2)
22618 ENTER
22619+
22620+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22621+ GET_THREAD_INFO(%_ASM_BX)
22622 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22623 sub $1,%_ASM_BX
22624 cmp %_ASM_BX,%_ASM_CX
22625 jae bad_put_user
22626-2: movw %ax,(%_ASM_CX)
22627+
22628+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22629+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22630+ cmp %_ASM_BX,%_ASM_CX
22631+ jb 1234f
22632+ xor %ebx,%ebx
22633+1234:
22634+#endif
22635+
22636+#endif
22637+
22638+2: __copyuser_seg movw %ax,(_DEST)
22639 xor %eax,%eax
22640 EXIT
22641 ENDPROC(__put_user_2)
22642
22643 ENTRY(__put_user_4)
22644 ENTER
22645+
22646+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22647+ GET_THREAD_INFO(%_ASM_BX)
22648 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22649 sub $3,%_ASM_BX
22650 cmp %_ASM_BX,%_ASM_CX
22651 jae bad_put_user
22652-3: movl %eax,(%_ASM_CX)
22653+
22654+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22655+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22656+ cmp %_ASM_BX,%_ASM_CX
22657+ jb 1234f
22658+ xor %ebx,%ebx
22659+1234:
22660+#endif
22661+
22662+#endif
22663+
22664+3: __copyuser_seg movl %eax,(_DEST)
22665 xor %eax,%eax
22666 EXIT
22667 ENDPROC(__put_user_4)
22668
22669 ENTRY(__put_user_8)
22670 ENTER
22671+
22672+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22673+ GET_THREAD_INFO(%_ASM_BX)
22674 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22675 sub $7,%_ASM_BX
22676 cmp %_ASM_BX,%_ASM_CX
22677 jae bad_put_user
22678-4: mov %_ASM_AX,(%_ASM_CX)
22679+
22680+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22681+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22682+ cmp %_ASM_BX,%_ASM_CX
22683+ jb 1234f
22684+ xor %ebx,%ebx
22685+1234:
22686+#endif
22687+
22688+#endif
22689+
22690+4: __copyuser_seg mov %_ASM_AX,(_DEST)
22691 #ifdef CONFIG_X86_32
22692-5: movl %edx,4(%_ASM_CX)
22693+5: __copyuser_seg movl %edx,4(_DEST)
22694 #endif
22695 xor %eax,%eax
22696 EXIT
22697diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
22698index 1cad221..de671ee 100644
22699--- a/arch/x86/lib/rwlock.S
22700+++ b/arch/x86/lib/rwlock.S
22701@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
22702 FRAME
22703 0: LOCK_PREFIX
22704 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22705+
22706+#ifdef CONFIG_PAX_REFCOUNT
22707+ jno 1234f
22708+ LOCK_PREFIX
22709+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22710+ int $4
22711+1234:
22712+ _ASM_EXTABLE(1234b, 1234b)
22713+#endif
22714+
22715 1: rep; nop
22716 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
22717 jne 1b
22718 LOCK_PREFIX
22719 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22720+
22721+#ifdef CONFIG_PAX_REFCOUNT
22722+ jno 1234f
22723+ LOCK_PREFIX
22724+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22725+ int $4
22726+1234:
22727+ _ASM_EXTABLE(1234b, 1234b)
22728+#endif
22729+
22730 jnz 0b
22731 ENDFRAME
22732+ pax_force_retaddr
22733 ret
22734 CFI_ENDPROC
22735 END(__write_lock_failed)
22736@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
22737 FRAME
22738 0: LOCK_PREFIX
22739 READ_LOCK_SIZE(inc) (%__lock_ptr)
22740+
22741+#ifdef CONFIG_PAX_REFCOUNT
22742+ jno 1234f
22743+ LOCK_PREFIX
22744+ READ_LOCK_SIZE(dec) (%__lock_ptr)
22745+ int $4
22746+1234:
22747+ _ASM_EXTABLE(1234b, 1234b)
22748+#endif
22749+
22750 1: rep; nop
22751 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
22752 js 1b
22753 LOCK_PREFIX
22754 READ_LOCK_SIZE(dec) (%__lock_ptr)
22755+
22756+#ifdef CONFIG_PAX_REFCOUNT
22757+ jno 1234f
22758+ LOCK_PREFIX
22759+ READ_LOCK_SIZE(inc) (%__lock_ptr)
22760+ int $4
22761+1234:
22762+ _ASM_EXTABLE(1234b, 1234b)
22763+#endif
22764+
22765 js 0b
22766 ENDFRAME
22767+ pax_force_retaddr
22768 ret
22769 CFI_ENDPROC
22770 END(__read_lock_failed)
22771diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
22772index 5dff5f0..cadebf4 100644
22773--- a/arch/x86/lib/rwsem.S
22774+++ b/arch/x86/lib/rwsem.S
22775@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
22776 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22777 CFI_RESTORE __ASM_REG(dx)
22778 restore_common_regs
22779+ pax_force_retaddr
22780 ret
22781 CFI_ENDPROC
22782 ENDPROC(call_rwsem_down_read_failed)
22783@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
22784 movq %rax,%rdi
22785 call rwsem_down_write_failed
22786 restore_common_regs
22787+ pax_force_retaddr
22788 ret
22789 CFI_ENDPROC
22790 ENDPROC(call_rwsem_down_write_failed)
22791@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
22792 movq %rax,%rdi
22793 call rwsem_wake
22794 restore_common_regs
22795-1: ret
22796+1: pax_force_retaddr
22797+ ret
22798 CFI_ENDPROC
22799 ENDPROC(call_rwsem_wake)
22800
22801@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
22802 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22803 CFI_RESTORE __ASM_REG(dx)
22804 restore_common_regs
22805+ pax_force_retaddr
22806 ret
22807 CFI_ENDPROC
22808 ENDPROC(call_rwsem_downgrade_wake)
22809diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
22810index a63efd6..ccecad8 100644
22811--- a/arch/x86/lib/thunk_64.S
22812+++ b/arch/x86/lib/thunk_64.S
22813@@ -8,6 +8,7 @@
22814 #include <linux/linkage.h>
22815 #include <asm/dwarf2.h>
22816 #include <asm/calling.h>
22817+#include <asm/alternative-asm.h>
22818
22819 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
22820 .macro THUNK name, func, put_ret_addr_in_rdi=0
22821@@ -41,5 +42,6 @@
22822 SAVE_ARGS
22823 restore:
22824 RESTORE_ARGS
22825+ pax_force_retaddr
22826 ret
22827 CFI_ENDPROC
22828diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
22829index ef2a6a5..3b28862 100644
22830--- a/arch/x86/lib/usercopy_32.c
22831+++ b/arch/x86/lib/usercopy_32.c
22832@@ -41,10 +41,12 @@ do { \
22833 int __d0; \
22834 might_fault(); \
22835 __asm__ __volatile__( \
22836+ __COPYUSER_SET_ES \
22837 "0: rep; stosl\n" \
22838 " movl %2,%0\n" \
22839 "1: rep; stosb\n" \
22840 "2:\n" \
22841+ __COPYUSER_RESTORE_ES \
22842 ".section .fixup,\"ax\"\n" \
22843 "3: lea 0(%2,%0,4),%0\n" \
22844 " jmp 2b\n" \
22845@@ -113,6 +115,7 @@ long strnlen_user(const char __user *s, long n)
22846 might_fault();
22847
22848 __asm__ __volatile__(
22849+ __COPYUSER_SET_ES
22850 " testl %0, %0\n"
22851 " jz 3f\n"
22852 " andl %0,%%ecx\n"
22853@@ -121,6 +124,7 @@ long strnlen_user(const char __user *s, long n)
22854 " subl %%ecx,%0\n"
22855 " addl %0,%%eax\n"
22856 "1:\n"
22857+ __COPYUSER_RESTORE_ES
22858 ".section .fixup,\"ax\"\n"
22859 "2: xorl %%eax,%%eax\n"
22860 " jmp 1b\n"
22861@@ -140,7 +144,7 @@ EXPORT_SYMBOL(strnlen_user);
22862
22863 #ifdef CONFIG_X86_INTEL_USERCOPY
22864 static unsigned long
22865-__copy_user_intel(void __user *to, const void *from, unsigned long size)
22866+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
22867 {
22868 int d0, d1;
22869 __asm__ __volatile__(
22870@@ -152,36 +156,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22871 " .align 2,0x90\n"
22872 "3: movl 0(%4), %%eax\n"
22873 "4: movl 4(%4), %%edx\n"
22874- "5: movl %%eax, 0(%3)\n"
22875- "6: movl %%edx, 4(%3)\n"
22876+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
22877+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
22878 "7: movl 8(%4), %%eax\n"
22879 "8: movl 12(%4),%%edx\n"
22880- "9: movl %%eax, 8(%3)\n"
22881- "10: movl %%edx, 12(%3)\n"
22882+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
22883+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
22884 "11: movl 16(%4), %%eax\n"
22885 "12: movl 20(%4), %%edx\n"
22886- "13: movl %%eax, 16(%3)\n"
22887- "14: movl %%edx, 20(%3)\n"
22888+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
22889+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
22890 "15: movl 24(%4), %%eax\n"
22891 "16: movl 28(%4), %%edx\n"
22892- "17: movl %%eax, 24(%3)\n"
22893- "18: movl %%edx, 28(%3)\n"
22894+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
22895+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
22896 "19: movl 32(%4), %%eax\n"
22897 "20: movl 36(%4), %%edx\n"
22898- "21: movl %%eax, 32(%3)\n"
22899- "22: movl %%edx, 36(%3)\n"
22900+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
22901+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
22902 "23: movl 40(%4), %%eax\n"
22903 "24: movl 44(%4), %%edx\n"
22904- "25: movl %%eax, 40(%3)\n"
22905- "26: movl %%edx, 44(%3)\n"
22906+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
22907+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
22908 "27: movl 48(%4), %%eax\n"
22909 "28: movl 52(%4), %%edx\n"
22910- "29: movl %%eax, 48(%3)\n"
22911- "30: movl %%edx, 52(%3)\n"
22912+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
22913+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
22914 "31: movl 56(%4), %%eax\n"
22915 "32: movl 60(%4), %%edx\n"
22916- "33: movl %%eax, 56(%3)\n"
22917- "34: movl %%edx, 60(%3)\n"
22918+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
22919+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
22920 " addl $-64, %0\n"
22921 " addl $64, %4\n"
22922 " addl $64, %3\n"
22923@@ -191,10 +195,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22924 " shrl $2, %0\n"
22925 " andl $3, %%eax\n"
22926 " cld\n"
22927+ __COPYUSER_SET_ES
22928 "99: rep; movsl\n"
22929 "36: movl %%eax, %0\n"
22930 "37: rep; movsb\n"
22931 "100:\n"
22932+ __COPYUSER_RESTORE_ES
22933 ".section .fixup,\"ax\"\n"
22934 "101: lea 0(%%eax,%0,4),%0\n"
22935 " jmp 100b\n"
22936@@ -247,46 +253,155 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22937 }
22938
22939 static unsigned long
22940+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
22941+{
22942+ int d0, d1;
22943+ __asm__ __volatile__(
22944+ " .align 2,0x90\n"
22945+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
22946+ " cmpl $67, %0\n"
22947+ " jbe 3f\n"
22948+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
22949+ " .align 2,0x90\n"
22950+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
22951+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
22952+ "5: movl %%eax, 0(%3)\n"
22953+ "6: movl %%edx, 4(%3)\n"
22954+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
22955+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
22956+ "9: movl %%eax, 8(%3)\n"
22957+ "10: movl %%edx, 12(%3)\n"
22958+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
22959+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
22960+ "13: movl %%eax, 16(%3)\n"
22961+ "14: movl %%edx, 20(%3)\n"
22962+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
22963+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
22964+ "17: movl %%eax, 24(%3)\n"
22965+ "18: movl %%edx, 28(%3)\n"
22966+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
22967+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
22968+ "21: movl %%eax, 32(%3)\n"
22969+ "22: movl %%edx, 36(%3)\n"
22970+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
22971+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
22972+ "25: movl %%eax, 40(%3)\n"
22973+ "26: movl %%edx, 44(%3)\n"
22974+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
22975+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
22976+ "29: movl %%eax, 48(%3)\n"
22977+ "30: movl %%edx, 52(%3)\n"
22978+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
22979+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
22980+ "33: movl %%eax, 56(%3)\n"
22981+ "34: movl %%edx, 60(%3)\n"
22982+ " addl $-64, %0\n"
22983+ " addl $64, %4\n"
22984+ " addl $64, %3\n"
22985+ " cmpl $63, %0\n"
22986+ " ja 1b\n"
22987+ "35: movl %0, %%eax\n"
22988+ " shrl $2, %0\n"
22989+ " andl $3, %%eax\n"
22990+ " cld\n"
22991+ "99: rep; "__copyuser_seg" movsl\n"
22992+ "36: movl %%eax, %0\n"
22993+ "37: rep; "__copyuser_seg" movsb\n"
22994+ "100:\n"
22995+ ".section .fixup,\"ax\"\n"
22996+ "101: lea 0(%%eax,%0,4),%0\n"
22997+ " jmp 100b\n"
22998+ ".previous\n"
22999+ ".section __ex_table,\"a\"\n"
23000+ " .align 4\n"
23001+ " .long 1b,100b\n"
23002+ " .long 2b,100b\n"
23003+ " .long 3b,100b\n"
23004+ " .long 4b,100b\n"
23005+ " .long 5b,100b\n"
23006+ " .long 6b,100b\n"
23007+ " .long 7b,100b\n"
23008+ " .long 8b,100b\n"
23009+ " .long 9b,100b\n"
23010+ " .long 10b,100b\n"
23011+ " .long 11b,100b\n"
23012+ " .long 12b,100b\n"
23013+ " .long 13b,100b\n"
23014+ " .long 14b,100b\n"
23015+ " .long 15b,100b\n"
23016+ " .long 16b,100b\n"
23017+ " .long 17b,100b\n"
23018+ " .long 18b,100b\n"
23019+ " .long 19b,100b\n"
23020+ " .long 20b,100b\n"
23021+ " .long 21b,100b\n"
23022+ " .long 22b,100b\n"
23023+ " .long 23b,100b\n"
23024+ " .long 24b,100b\n"
23025+ " .long 25b,100b\n"
23026+ " .long 26b,100b\n"
23027+ " .long 27b,100b\n"
23028+ " .long 28b,100b\n"
23029+ " .long 29b,100b\n"
23030+ " .long 30b,100b\n"
23031+ " .long 31b,100b\n"
23032+ " .long 32b,100b\n"
23033+ " .long 33b,100b\n"
23034+ " .long 34b,100b\n"
23035+ " .long 35b,100b\n"
23036+ " .long 36b,100b\n"
23037+ " .long 37b,100b\n"
23038+ " .long 99b,101b\n"
23039+ ".previous"
23040+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
23041+ : "1"(to), "2"(from), "0"(size)
23042+ : "eax", "edx", "memory");
23043+ return size;
23044+}
23045+
23046+static unsigned long
23047+__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) __size_overflow(3);
23048+static unsigned long
23049 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23050 {
23051 int d0, d1;
23052 __asm__ __volatile__(
23053 " .align 2,0x90\n"
23054- "0: movl 32(%4), %%eax\n"
23055+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23056 " cmpl $67, %0\n"
23057 " jbe 2f\n"
23058- "1: movl 64(%4), %%eax\n"
23059+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23060 " .align 2,0x90\n"
23061- "2: movl 0(%4), %%eax\n"
23062- "21: movl 4(%4), %%edx\n"
23063+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23064+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23065 " movl %%eax, 0(%3)\n"
23066 " movl %%edx, 4(%3)\n"
23067- "3: movl 8(%4), %%eax\n"
23068- "31: movl 12(%4),%%edx\n"
23069+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23070+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23071 " movl %%eax, 8(%3)\n"
23072 " movl %%edx, 12(%3)\n"
23073- "4: movl 16(%4), %%eax\n"
23074- "41: movl 20(%4), %%edx\n"
23075+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23076+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23077 " movl %%eax, 16(%3)\n"
23078 " movl %%edx, 20(%3)\n"
23079- "10: movl 24(%4), %%eax\n"
23080- "51: movl 28(%4), %%edx\n"
23081+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23082+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23083 " movl %%eax, 24(%3)\n"
23084 " movl %%edx, 28(%3)\n"
23085- "11: movl 32(%4), %%eax\n"
23086- "61: movl 36(%4), %%edx\n"
23087+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23088+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23089 " movl %%eax, 32(%3)\n"
23090 " movl %%edx, 36(%3)\n"
23091- "12: movl 40(%4), %%eax\n"
23092- "71: movl 44(%4), %%edx\n"
23093+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23094+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23095 " movl %%eax, 40(%3)\n"
23096 " movl %%edx, 44(%3)\n"
23097- "13: movl 48(%4), %%eax\n"
23098- "81: movl 52(%4), %%edx\n"
23099+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23100+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23101 " movl %%eax, 48(%3)\n"
23102 " movl %%edx, 52(%3)\n"
23103- "14: movl 56(%4), %%eax\n"
23104- "91: movl 60(%4), %%edx\n"
23105+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23106+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23107 " movl %%eax, 56(%3)\n"
23108 " movl %%edx, 60(%3)\n"
23109 " addl $-64, %0\n"
23110@@ -298,9 +413,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23111 " shrl $2, %0\n"
23112 " andl $3, %%eax\n"
23113 " cld\n"
23114- "6: rep; movsl\n"
23115+ "6: rep; "__copyuser_seg" movsl\n"
23116 " movl %%eax,%0\n"
23117- "7: rep; movsb\n"
23118+ "7: rep; "__copyuser_seg" movsb\n"
23119 "8:\n"
23120 ".section .fixup,\"ax\"\n"
23121 "9: lea 0(%%eax,%0,4),%0\n"
23122@@ -347,47 +462,49 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23123 */
23124
23125 static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23126+ const void __user *from, unsigned long size) __size_overflow(3);
23127+static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23128 const void __user *from, unsigned long size)
23129 {
23130 int d0, d1;
23131
23132 __asm__ __volatile__(
23133 " .align 2,0x90\n"
23134- "0: movl 32(%4), %%eax\n"
23135+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23136 " cmpl $67, %0\n"
23137 " jbe 2f\n"
23138- "1: movl 64(%4), %%eax\n"
23139+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23140 " .align 2,0x90\n"
23141- "2: movl 0(%4), %%eax\n"
23142- "21: movl 4(%4), %%edx\n"
23143+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23144+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23145 " movnti %%eax, 0(%3)\n"
23146 " movnti %%edx, 4(%3)\n"
23147- "3: movl 8(%4), %%eax\n"
23148- "31: movl 12(%4),%%edx\n"
23149+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23150+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23151 " movnti %%eax, 8(%3)\n"
23152 " movnti %%edx, 12(%3)\n"
23153- "4: movl 16(%4), %%eax\n"
23154- "41: movl 20(%4), %%edx\n"
23155+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23156+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23157 " movnti %%eax, 16(%3)\n"
23158 " movnti %%edx, 20(%3)\n"
23159- "10: movl 24(%4), %%eax\n"
23160- "51: movl 28(%4), %%edx\n"
23161+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23162+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23163 " movnti %%eax, 24(%3)\n"
23164 " movnti %%edx, 28(%3)\n"
23165- "11: movl 32(%4), %%eax\n"
23166- "61: movl 36(%4), %%edx\n"
23167+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23168+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23169 " movnti %%eax, 32(%3)\n"
23170 " movnti %%edx, 36(%3)\n"
23171- "12: movl 40(%4), %%eax\n"
23172- "71: movl 44(%4), %%edx\n"
23173+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23174+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23175 " movnti %%eax, 40(%3)\n"
23176 " movnti %%edx, 44(%3)\n"
23177- "13: movl 48(%4), %%eax\n"
23178- "81: movl 52(%4), %%edx\n"
23179+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23180+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23181 " movnti %%eax, 48(%3)\n"
23182 " movnti %%edx, 52(%3)\n"
23183- "14: movl 56(%4), %%eax\n"
23184- "91: movl 60(%4), %%edx\n"
23185+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23186+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23187 " movnti %%eax, 56(%3)\n"
23188 " movnti %%edx, 60(%3)\n"
23189 " addl $-64, %0\n"
23190@@ -400,9 +517,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23191 " shrl $2, %0\n"
23192 " andl $3, %%eax\n"
23193 " cld\n"
23194- "6: rep; movsl\n"
23195+ "6: rep; "__copyuser_seg" movsl\n"
23196 " movl %%eax,%0\n"
23197- "7: rep; movsb\n"
23198+ "7: rep; "__copyuser_seg" movsb\n"
23199 "8:\n"
23200 ".section .fixup,\"ax\"\n"
23201 "9: lea 0(%%eax,%0,4),%0\n"
23202@@ -444,47 +561,49 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23203 }
23204
23205 static unsigned long __copy_user_intel_nocache(void *to,
23206+ const void __user *from, unsigned long size) __size_overflow(3);
23207+static unsigned long __copy_user_intel_nocache(void *to,
23208 const void __user *from, unsigned long size)
23209 {
23210 int d0, d1;
23211
23212 __asm__ __volatile__(
23213 " .align 2,0x90\n"
23214- "0: movl 32(%4), %%eax\n"
23215+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23216 " cmpl $67, %0\n"
23217 " jbe 2f\n"
23218- "1: movl 64(%4), %%eax\n"
23219+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23220 " .align 2,0x90\n"
23221- "2: movl 0(%4), %%eax\n"
23222- "21: movl 4(%4), %%edx\n"
23223+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23224+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23225 " movnti %%eax, 0(%3)\n"
23226 " movnti %%edx, 4(%3)\n"
23227- "3: movl 8(%4), %%eax\n"
23228- "31: movl 12(%4),%%edx\n"
23229+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23230+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23231 " movnti %%eax, 8(%3)\n"
23232 " movnti %%edx, 12(%3)\n"
23233- "4: movl 16(%4), %%eax\n"
23234- "41: movl 20(%4), %%edx\n"
23235+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23236+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23237 " movnti %%eax, 16(%3)\n"
23238 " movnti %%edx, 20(%3)\n"
23239- "10: movl 24(%4), %%eax\n"
23240- "51: movl 28(%4), %%edx\n"
23241+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23242+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23243 " movnti %%eax, 24(%3)\n"
23244 " movnti %%edx, 28(%3)\n"
23245- "11: movl 32(%4), %%eax\n"
23246- "61: movl 36(%4), %%edx\n"
23247+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23248+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23249 " movnti %%eax, 32(%3)\n"
23250 " movnti %%edx, 36(%3)\n"
23251- "12: movl 40(%4), %%eax\n"
23252- "71: movl 44(%4), %%edx\n"
23253+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23254+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23255 " movnti %%eax, 40(%3)\n"
23256 " movnti %%edx, 44(%3)\n"
23257- "13: movl 48(%4), %%eax\n"
23258- "81: movl 52(%4), %%edx\n"
23259+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23260+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23261 " movnti %%eax, 48(%3)\n"
23262 " movnti %%edx, 52(%3)\n"
23263- "14: movl 56(%4), %%eax\n"
23264- "91: movl 60(%4), %%edx\n"
23265+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23266+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23267 " movnti %%eax, 56(%3)\n"
23268 " movnti %%edx, 60(%3)\n"
23269 " addl $-64, %0\n"
23270@@ -497,9 +616,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23271 " shrl $2, %0\n"
23272 " andl $3, %%eax\n"
23273 " cld\n"
23274- "6: rep; movsl\n"
23275+ "6: rep; "__copyuser_seg" movsl\n"
23276 " movl %%eax,%0\n"
23277- "7: rep; movsb\n"
23278+ "7: rep; "__copyuser_seg" movsb\n"
23279 "8:\n"
23280 ".section .fixup,\"ax\"\n"
23281 "9: lea 0(%%eax,%0,4),%0\n"
23282@@ -542,32 +661,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23283 */
23284 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23285 unsigned long size);
23286-unsigned long __copy_user_intel(void __user *to, const void *from,
23287+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23288+ unsigned long size);
23289+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23290 unsigned long size);
23291 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23292 const void __user *from, unsigned long size);
23293 #endif /* CONFIG_X86_INTEL_USERCOPY */
23294
23295 /* Generic arbitrary sized copy. */
23296-#define __copy_user(to, from, size) \
23297+#define __copy_user(to, from, size, prefix, set, restore) \
23298 do { \
23299 int __d0, __d1, __d2; \
23300 __asm__ __volatile__( \
23301+ set \
23302 " cmp $7,%0\n" \
23303 " jbe 1f\n" \
23304 " movl %1,%0\n" \
23305 " negl %0\n" \
23306 " andl $7,%0\n" \
23307 " subl %0,%3\n" \
23308- "4: rep; movsb\n" \
23309+ "4: rep; "prefix"movsb\n" \
23310 " movl %3,%0\n" \
23311 " shrl $2,%0\n" \
23312 " andl $3,%3\n" \
23313 " .align 2,0x90\n" \
23314- "0: rep; movsl\n" \
23315+ "0: rep; "prefix"movsl\n" \
23316 " movl %3,%0\n" \
23317- "1: rep; movsb\n" \
23318+ "1: rep; "prefix"movsb\n" \
23319 "2:\n" \
23320+ restore \
23321 ".section .fixup,\"ax\"\n" \
23322 "5: addl %3,%0\n" \
23323 " jmp 2b\n" \
23324@@ -595,14 +718,14 @@ do { \
23325 " negl %0\n" \
23326 " andl $7,%0\n" \
23327 " subl %0,%3\n" \
23328- "4: rep; movsb\n" \
23329+ "4: rep; "__copyuser_seg"movsb\n" \
23330 " movl %3,%0\n" \
23331 " shrl $2,%0\n" \
23332 " andl $3,%3\n" \
23333 " .align 2,0x90\n" \
23334- "0: rep; movsl\n" \
23335+ "0: rep; "__copyuser_seg"movsl\n" \
23336 " movl %3,%0\n" \
23337- "1: rep; movsb\n" \
23338+ "1: rep; "__copyuser_seg"movsb\n" \
23339 "2:\n" \
23340 ".section .fixup,\"ax\"\n" \
23341 "5: addl %3,%0\n" \
23342@@ -688,9 +811,9 @@ survive:
23343 }
23344 #endif
23345 if (movsl_is_ok(to, from, n))
23346- __copy_user(to, from, n);
23347+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23348 else
23349- n = __copy_user_intel(to, from, n);
23350+ n = __generic_copy_to_user_intel(to, from, n);
23351 return n;
23352 }
23353 EXPORT_SYMBOL(__copy_to_user_ll);
23354@@ -710,10 +833,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23355 unsigned long n)
23356 {
23357 if (movsl_is_ok(to, from, n))
23358- __copy_user(to, from, n);
23359+ __copy_user(to, from, n, __copyuser_seg, "", "");
23360 else
23361- n = __copy_user_intel((void __user *)to,
23362- (const void *)from, n);
23363+ n = __generic_copy_from_user_intel(to, from, n);
23364 return n;
23365 }
23366 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23367@@ -740,65 +862,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23368 if (n > 64 && cpu_has_xmm2)
23369 n = __copy_user_intel_nocache(to, from, n);
23370 else
23371- __copy_user(to, from, n);
23372+ __copy_user(to, from, n, __copyuser_seg, "", "");
23373 #else
23374- __copy_user(to, from, n);
23375+ __copy_user(to, from, n, __copyuser_seg, "", "");
23376 #endif
23377 return n;
23378 }
23379 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23380
23381-/**
23382- * copy_to_user: - Copy a block of data into user space.
23383- * @to: Destination address, in user space.
23384- * @from: Source address, in kernel space.
23385- * @n: Number of bytes to copy.
23386- *
23387- * Context: User context only. This function may sleep.
23388- *
23389- * Copy data from kernel space to user space.
23390- *
23391- * Returns number of bytes that could not be copied.
23392- * On success, this will be zero.
23393- */
23394-unsigned long
23395-copy_to_user(void __user *to, const void *from, unsigned long n)
23396-{
23397- if (access_ok(VERIFY_WRITE, to, n))
23398- n = __copy_to_user(to, from, n);
23399- return n;
23400-}
23401-EXPORT_SYMBOL(copy_to_user);
23402-
23403-/**
23404- * copy_from_user: - Copy a block of data from user space.
23405- * @to: Destination address, in kernel space.
23406- * @from: Source address, in user space.
23407- * @n: Number of bytes to copy.
23408- *
23409- * Context: User context only. This function may sleep.
23410- *
23411- * Copy data from user space to kernel space.
23412- *
23413- * Returns number of bytes that could not be copied.
23414- * On success, this will be zero.
23415- *
23416- * If some data could not be copied, this function will pad the copied
23417- * data to the requested size using zero bytes.
23418- */
23419-unsigned long
23420-_copy_from_user(void *to, const void __user *from, unsigned long n)
23421-{
23422- if (access_ok(VERIFY_READ, from, n))
23423- n = __copy_from_user(to, from, n);
23424- else
23425- memset(to, 0, n);
23426- return n;
23427-}
23428-EXPORT_SYMBOL(_copy_from_user);
23429-
23430 void copy_from_user_overflow(void)
23431 {
23432 WARN(1, "Buffer overflow detected!\n");
23433 }
23434 EXPORT_SYMBOL(copy_from_user_overflow);
23435+
23436+void copy_to_user_overflow(void)
23437+{
23438+ WARN(1, "Buffer overflow detected!\n");
23439+}
23440+EXPORT_SYMBOL(copy_to_user_overflow);
23441+
23442+#ifdef CONFIG_PAX_MEMORY_UDEREF
23443+void __set_fs(mm_segment_t x)
23444+{
23445+ switch (x.seg) {
23446+ case 0:
23447+ loadsegment(gs, 0);
23448+ break;
23449+ case TASK_SIZE_MAX:
23450+ loadsegment(gs, __USER_DS);
23451+ break;
23452+ case -1UL:
23453+ loadsegment(gs, __KERNEL_DS);
23454+ break;
23455+ default:
23456+ BUG();
23457+ }
23458+ return;
23459+}
23460+EXPORT_SYMBOL(__set_fs);
23461+
23462+void set_fs(mm_segment_t x)
23463+{
23464+ current_thread_info()->addr_limit = x;
23465+ __set_fs(x);
23466+}
23467+EXPORT_SYMBOL(set_fs);
23468+#endif
23469diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23470index 0d0326f..6a6155b 100644
23471--- a/arch/x86/lib/usercopy_64.c
23472+++ b/arch/x86/lib/usercopy_64.c
23473@@ -16,6 +16,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23474 {
23475 long __d0;
23476 might_fault();
23477+
23478+#ifdef CONFIG_PAX_MEMORY_UDEREF
23479+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23480+ addr += PAX_USER_SHADOW_BASE;
23481+#endif
23482+
23483 /* no memory constraint because it doesn't change any memory gcc knows
23484 about */
23485 asm volatile(
23486@@ -100,12 +106,20 @@ long strlen_user(const char __user *s)
23487 }
23488 EXPORT_SYMBOL(strlen_user);
23489
23490-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23491+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23492 {
23493- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23494- return copy_user_generic((__force void *)to, (__force void *)from, len);
23495- }
23496- return len;
23497+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23498+
23499+#ifdef CONFIG_PAX_MEMORY_UDEREF
23500+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23501+ to += PAX_USER_SHADOW_BASE;
23502+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23503+ from += PAX_USER_SHADOW_BASE;
23504+#endif
23505+
23506+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23507+ }
23508+ return len;
23509 }
23510 EXPORT_SYMBOL(copy_in_user);
23511
23512@@ -115,7 +129,7 @@ EXPORT_SYMBOL(copy_in_user);
23513 * it is not necessary to optimize tail handling.
23514 */
23515 unsigned long
23516-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23517+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23518 {
23519 char c;
23520 unsigned zero_len;
23521@@ -132,3 +146,15 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23522 break;
23523 return len;
23524 }
23525+
23526+void copy_from_user_overflow(void)
23527+{
23528+ WARN(1, "Buffer overflow detected!\n");
23529+}
23530+EXPORT_SYMBOL(copy_from_user_overflow);
23531+
23532+void copy_to_user_overflow(void)
23533+{
23534+ WARN(1, "Buffer overflow detected!\n");
23535+}
23536+EXPORT_SYMBOL(copy_to_user_overflow);
23537diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23538index 1fb85db..8b3540b 100644
23539--- a/arch/x86/mm/extable.c
23540+++ b/arch/x86/mm/extable.c
23541@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
23542 const struct exception_table_entry *fixup;
23543
23544 #ifdef CONFIG_PNPBIOS
23545- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
23546+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
23547 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
23548 extern u32 pnp_bios_is_utter_crap;
23549 pnp_bios_is_utter_crap = 1;
23550diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
23551index 3ecfd1a..304d554 100644
23552--- a/arch/x86/mm/fault.c
23553+++ b/arch/x86/mm/fault.c
23554@@ -13,11 +13,18 @@
23555 #include <linux/perf_event.h> /* perf_sw_event */
23556 #include <linux/hugetlb.h> /* hstate_index_to_shift */
23557 #include <linux/prefetch.h> /* prefetchw */
23558+#include <linux/unistd.h>
23559+#include <linux/compiler.h>
23560
23561 #include <asm/traps.h> /* dotraplinkage, ... */
23562 #include <asm/pgalloc.h> /* pgd_*(), ... */
23563 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
23564 #include <asm/fixmap.h> /* VSYSCALL_START */
23565+#include <asm/tlbflush.h>
23566+
23567+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23568+#include <asm/stacktrace.h>
23569+#endif
23570
23571 /*
23572 * Page fault error code bits:
23573@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
23574 int ret = 0;
23575
23576 /* kprobe_running() needs smp_processor_id() */
23577- if (kprobes_built_in() && !user_mode_vm(regs)) {
23578+ if (kprobes_built_in() && !user_mode(regs)) {
23579 preempt_disable();
23580 if (kprobe_running() && kprobe_fault_handler(regs, 14))
23581 ret = 1;
23582@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
23583 return !instr_lo || (instr_lo>>1) == 1;
23584 case 0x00:
23585 /* Prefetch instruction is 0x0F0D or 0x0F18 */
23586- if (probe_kernel_address(instr, opcode))
23587+ if (user_mode(regs)) {
23588+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23589+ return 0;
23590+ } else if (probe_kernel_address(instr, opcode))
23591 return 0;
23592
23593 *prefetch = (instr_lo == 0xF) &&
23594@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
23595 while (instr < max_instr) {
23596 unsigned char opcode;
23597
23598- if (probe_kernel_address(instr, opcode))
23599+ if (user_mode(regs)) {
23600+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23601+ break;
23602+ } else if (probe_kernel_address(instr, opcode))
23603 break;
23604
23605 instr++;
23606@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
23607 force_sig_info(si_signo, &info, tsk);
23608 }
23609
23610+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23611+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
23612+#endif
23613+
23614+#ifdef CONFIG_PAX_EMUTRAMP
23615+static int pax_handle_fetch_fault(struct pt_regs *regs);
23616+#endif
23617+
23618+#ifdef CONFIG_PAX_PAGEEXEC
23619+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
23620+{
23621+ pgd_t *pgd;
23622+ pud_t *pud;
23623+ pmd_t *pmd;
23624+
23625+ pgd = pgd_offset(mm, address);
23626+ if (!pgd_present(*pgd))
23627+ return NULL;
23628+ pud = pud_offset(pgd, address);
23629+ if (!pud_present(*pud))
23630+ return NULL;
23631+ pmd = pmd_offset(pud, address);
23632+ if (!pmd_present(*pmd))
23633+ return NULL;
23634+ return pmd;
23635+}
23636+#endif
23637+
23638 DEFINE_SPINLOCK(pgd_lock);
23639 LIST_HEAD(pgd_list);
23640
23641@@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
23642 for (address = VMALLOC_START & PMD_MASK;
23643 address >= TASK_SIZE && address < FIXADDR_TOP;
23644 address += PMD_SIZE) {
23645+
23646+#ifdef CONFIG_PAX_PER_CPU_PGD
23647+ unsigned long cpu;
23648+#else
23649 struct page *page;
23650+#endif
23651
23652 spin_lock(&pgd_lock);
23653+
23654+#ifdef CONFIG_PAX_PER_CPU_PGD
23655+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23656+ pgd_t *pgd = get_cpu_pgd(cpu);
23657+ pmd_t *ret;
23658+#else
23659 list_for_each_entry(page, &pgd_list, lru) {
23660+ pgd_t *pgd = page_address(page);
23661 spinlock_t *pgt_lock;
23662 pmd_t *ret;
23663
23664@@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
23665 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
23666
23667 spin_lock(pgt_lock);
23668- ret = vmalloc_sync_one(page_address(page), address);
23669+#endif
23670+
23671+ ret = vmalloc_sync_one(pgd, address);
23672+
23673+#ifndef CONFIG_PAX_PER_CPU_PGD
23674 spin_unlock(pgt_lock);
23675+#endif
23676
23677 if (!ret)
23678 break;
23679@@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23680 * an interrupt in the middle of a task switch..
23681 */
23682 pgd_paddr = read_cr3();
23683+
23684+#ifdef CONFIG_PAX_PER_CPU_PGD
23685+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23686+#endif
23687+
23688 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23689 if (!pmd_k)
23690 return -1;
23691@@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23692 * happen within a race in page table update. In the later
23693 * case just flush:
23694 */
23695+
23696+#ifdef CONFIG_PAX_PER_CPU_PGD
23697+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23698+ pgd = pgd_offset_cpu(smp_processor_id(), address);
23699+#else
23700 pgd = pgd_offset(current->active_mm, address);
23701+#endif
23702+
23703 pgd_ref = pgd_offset_k(address);
23704 if (pgd_none(*pgd_ref))
23705 return -1;
23706@@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23707 static int is_errata100(struct pt_regs *regs, unsigned long address)
23708 {
23709 #ifdef CONFIG_X86_64
23710- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
23711+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
23712 return 1;
23713 #endif
23714 return 0;
23715@@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
23716 }
23717
23718 static const char nx_warning[] = KERN_CRIT
23719-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
23720+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
23721
23722 static void
23723 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23724@@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23725 if (!oops_may_print())
23726 return;
23727
23728- if (error_code & PF_INSTR) {
23729+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
23730 unsigned int level;
23731
23732 pte_t *pte = lookup_address(address, &level);
23733
23734 if (pte && pte_present(*pte) && !pte_exec(*pte))
23735- printk(nx_warning, current_uid());
23736+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
23737 }
23738
23739+#ifdef CONFIG_PAX_KERNEXEC
23740+ if (init_mm.start_code <= address && address < init_mm.end_code) {
23741+ if (current->signal->curr_ip)
23742+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23743+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
23744+ else
23745+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23746+ current->comm, task_pid_nr(current), current_uid(), current_euid());
23747+ }
23748+#endif
23749+
23750 printk(KERN_ALERT "BUG: unable to handle kernel ");
23751 if (address < PAGE_SIZE)
23752 printk(KERN_CONT "NULL pointer dereference");
23753@@ -748,6 +829,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
23754 }
23755 #endif
23756
23757+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23758+ if (pax_is_fetch_fault(regs, error_code, address)) {
23759+
23760+#ifdef CONFIG_PAX_EMUTRAMP
23761+ switch (pax_handle_fetch_fault(regs)) {
23762+ case 2:
23763+ return;
23764+ }
23765+#endif
23766+
23767+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23768+ do_group_exit(SIGKILL);
23769+ }
23770+#endif
23771+
23772 if (unlikely(show_unhandled_signals))
23773 show_signal_msg(regs, error_code, address, tsk);
23774
23775@@ -844,7 +940,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
23776 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
23777 printk(KERN_ERR
23778 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
23779- tsk->comm, tsk->pid, address);
23780+ tsk->comm, task_pid_nr(tsk), address);
23781 code = BUS_MCEERR_AR;
23782 }
23783 #endif
23784@@ -900,6 +996,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
23785 return 1;
23786 }
23787
23788+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23789+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
23790+{
23791+ pte_t *pte;
23792+ pmd_t *pmd;
23793+ spinlock_t *ptl;
23794+ unsigned char pte_mask;
23795+
23796+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
23797+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
23798+ return 0;
23799+
23800+ /* PaX: it's our fault, let's handle it if we can */
23801+
23802+ /* PaX: take a look at read faults before acquiring any locks */
23803+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
23804+ /* instruction fetch attempt from a protected page in user mode */
23805+ up_read(&mm->mmap_sem);
23806+
23807+#ifdef CONFIG_PAX_EMUTRAMP
23808+ switch (pax_handle_fetch_fault(regs)) {
23809+ case 2:
23810+ return 1;
23811+ }
23812+#endif
23813+
23814+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23815+ do_group_exit(SIGKILL);
23816+ }
23817+
23818+ pmd = pax_get_pmd(mm, address);
23819+ if (unlikely(!pmd))
23820+ return 0;
23821+
23822+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
23823+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
23824+ pte_unmap_unlock(pte, ptl);
23825+ return 0;
23826+ }
23827+
23828+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
23829+ /* write attempt to a protected page in user mode */
23830+ pte_unmap_unlock(pte, ptl);
23831+ return 0;
23832+ }
23833+
23834+#ifdef CONFIG_SMP
23835+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
23836+#else
23837+ if (likely(address > get_limit(regs->cs)))
23838+#endif
23839+ {
23840+ set_pte(pte, pte_mkread(*pte));
23841+ __flush_tlb_one(address);
23842+ pte_unmap_unlock(pte, ptl);
23843+ up_read(&mm->mmap_sem);
23844+ return 1;
23845+ }
23846+
23847+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
23848+
23849+ /*
23850+ * PaX: fill DTLB with user rights and retry
23851+ */
23852+ __asm__ __volatile__ (
23853+ "orb %2,(%1)\n"
23854+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
23855+/*
23856+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
23857+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
23858+ * page fault when examined during a TLB load attempt. this is true not only
23859+ * for PTEs holding a non-present entry but also present entries that will
23860+ * raise a page fault (such as those set up by PaX, or the copy-on-write
23861+ * mechanism). in effect it means that we do *not* need to flush the TLBs
23862+ * for our target pages since their PTEs are simply not in the TLBs at all.
23863+
23864+ * the best thing in omitting it is that we gain around 15-20% speed in the
23865+ * fast path of the page fault handler and can get rid of tracing since we
23866+ * can no longer flush unintended entries.
23867+ */
23868+ "invlpg (%0)\n"
23869+#endif
23870+ __copyuser_seg"testb $0,(%0)\n"
23871+ "xorb %3,(%1)\n"
23872+ :
23873+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
23874+ : "memory", "cc");
23875+ pte_unmap_unlock(pte, ptl);
23876+ up_read(&mm->mmap_sem);
23877+ return 1;
23878+}
23879+#endif
23880+
23881 /*
23882 * Handle a spurious fault caused by a stale TLB entry.
23883 *
23884@@ -972,6 +1161,9 @@ int show_unhandled_signals = 1;
23885 static inline int
23886 access_error(unsigned long error_code, struct vm_area_struct *vma)
23887 {
23888+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
23889+ return 1;
23890+
23891 if (error_code & PF_WRITE) {
23892 /* write, present and write, not present: */
23893 if (unlikely(!(vma->vm_flags & VM_WRITE)))
23894@@ -1005,18 +1197,33 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23895 {
23896 struct vm_area_struct *vma;
23897 struct task_struct *tsk;
23898- unsigned long address;
23899 struct mm_struct *mm;
23900 int fault;
23901 int write = error_code & PF_WRITE;
23902 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
23903 (write ? FAULT_FLAG_WRITE : 0);
23904
23905- tsk = current;
23906- mm = tsk->mm;
23907-
23908 /* Get the faulting address: */
23909- address = read_cr2();
23910+ unsigned long address = read_cr2();
23911+
23912+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23913+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
23914+ if (!search_exception_tables(regs->ip)) {
23915+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23916+ bad_area_nosemaphore(regs, error_code, address);
23917+ return;
23918+ }
23919+ if (address < PAX_USER_SHADOW_BASE) {
23920+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23921+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
23922+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
23923+ } else
23924+ address -= PAX_USER_SHADOW_BASE;
23925+ }
23926+#endif
23927+
23928+ tsk = current;
23929+ mm = tsk->mm;
23930
23931 /*
23932 * Detect and handle instructions that would cause a page fault for
23933@@ -1077,7 +1284,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23934 * User-mode registers count as a user access even for any
23935 * potential system fault or CPU buglet:
23936 */
23937- if (user_mode_vm(regs)) {
23938+ if (user_mode(regs)) {
23939 local_irq_enable();
23940 error_code |= PF_USER;
23941 } else {
23942@@ -1132,6 +1339,11 @@ retry:
23943 might_sleep();
23944 }
23945
23946+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23947+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
23948+ return;
23949+#endif
23950+
23951 vma = find_vma(mm, address);
23952 if (unlikely(!vma)) {
23953 bad_area(regs, error_code, address);
23954@@ -1143,18 +1355,24 @@ retry:
23955 bad_area(regs, error_code, address);
23956 return;
23957 }
23958- if (error_code & PF_USER) {
23959- /*
23960- * Accessing the stack below %sp is always a bug.
23961- * The large cushion allows instructions like enter
23962- * and pusha to work. ("enter $65535, $31" pushes
23963- * 32 pointers and then decrements %sp by 65535.)
23964- */
23965- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
23966- bad_area(regs, error_code, address);
23967- return;
23968- }
23969+ /*
23970+ * Accessing the stack below %sp is always a bug.
23971+ * The large cushion allows instructions like enter
23972+ * and pusha to work. ("enter $65535, $31" pushes
23973+ * 32 pointers and then decrements %sp by 65535.)
23974+ */
23975+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
23976+ bad_area(regs, error_code, address);
23977+ return;
23978 }
23979+
23980+#ifdef CONFIG_PAX_SEGMEXEC
23981+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
23982+ bad_area(regs, error_code, address);
23983+ return;
23984+ }
23985+#endif
23986+
23987 if (unlikely(expand_stack(vma, address))) {
23988 bad_area(regs, error_code, address);
23989 return;
23990@@ -1209,3 +1427,292 @@ good_area:
23991
23992 up_read(&mm->mmap_sem);
23993 }
23994+
23995+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23996+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
23997+{
23998+ struct mm_struct *mm = current->mm;
23999+ unsigned long ip = regs->ip;
24000+
24001+ if (v8086_mode(regs))
24002+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24003+
24004+#ifdef CONFIG_PAX_PAGEEXEC
24005+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24006+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24007+ return true;
24008+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24009+ return true;
24010+ return false;
24011+ }
24012+#endif
24013+
24014+#ifdef CONFIG_PAX_SEGMEXEC
24015+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24016+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24017+ return true;
24018+ return false;
24019+ }
24020+#endif
24021+
24022+ return false;
24023+}
24024+#endif
24025+
24026+#ifdef CONFIG_PAX_EMUTRAMP
24027+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24028+{
24029+ int err;
24030+
24031+ do { /* PaX: libffi trampoline emulation */
24032+ unsigned char mov, jmp;
24033+ unsigned int addr1, addr2;
24034+
24035+#ifdef CONFIG_X86_64
24036+ if ((regs->ip + 9) >> 32)
24037+ break;
24038+#endif
24039+
24040+ err = get_user(mov, (unsigned char __user *)regs->ip);
24041+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24042+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24043+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24044+
24045+ if (err)
24046+ break;
24047+
24048+ if (mov == 0xB8 && jmp == 0xE9) {
24049+ regs->ax = addr1;
24050+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24051+ return 2;
24052+ }
24053+ } while (0);
24054+
24055+ do { /* PaX: gcc trampoline emulation #1 */
24056+ unsigned char mov1, mov2;
24057+ unsigned short jmp;
24058+ unsigned int addr1, addr2;
24059+
24060+#ifdef CONFIG_X86_64
24061+ if ((regs->ip + 11) >> 32)
24062+ break;
24063+#endif
24064+
24065+ err = get_user(mov1, (unsigned char __user *)regs->ip);
24066+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24067+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24068+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24069+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24070+
24071+ if (err)
24072+ break;
24073+
24074+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24075+ regs->cx = addr1;
24076+ regs->ax = addr2;
24077+ regs->ip = addr2;
24078+ return 2;
24079+ }
24080+ } while (0);
24081+
24082+ do { /* PaX: gcc trampoline emulation #2 */
24083+ unsigned char mov, jmp;
24084+ unsigned int addr1, addr2;
24085+
24086+#ifdef CONFIG_X86_64
24087+ if ((regs->ip + 9) >> 32)
24088+ break;
24089+#endif
24090+
24091+ err = get_user(mov, (unsigned char __user *)regs->ip);
24092+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24093+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24094+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24095+
24096+ if (err)
24097+ break;
24098+
24099+ if (mov == 0xB9 && jmp == 0xE9) {
24100+ regs->cx = addr1;
24101+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24102+ return 2;
24103+ }
24104+ } while (0);
24105+
24106+ return 1; /* PaX in action */
24107+}
24108+
24109+#ifdef CONFIG_X86_64
24110+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24111+{
24112+ int err;
24113+
24114+ do { /* PaX: libffi trampoline emulation */
24115+ unsigned short mov1, mov2, jmp1;
24116+ unsigned char stcclc, jmp2;
24117+ unsigned long addr1, addr2;
24118+
24119+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24120+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24121+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24122+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24123+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
24124+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
24125+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
24126+
24127+ if (err)
24128+ break;
24129+
24130+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24131+ regs->r11 = addr1;
24132+ regs->r10 = addr2;
24133+ if (stcclc == 0xF8)
24134+ regs->flags &= ~X86_EFLAGS_CF;
24135+ else
24136+ regs->flags |= X86_EFLAGS_CF;
24137+ regs->ip = addr1;
24138+ return 2;
24139+ }
24140+ } while (0);
24141+
24142+ do { /* PaX: gcc trampoline emulation #1 */
24143+ unsigned short mov1, mov2, jmp1;
24144+ unsigned char jmp2;
24145+ unsigned int addr1;
24146+ unsigned long addr2;
24147+
24148+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24149+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24150+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24151+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24152+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24153+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24154+
24155+ if (err)
24156+ break;
24157+
24158+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24159+ regs->r11 = addr1;
24160+ regs->r10 = addr2;
24161+ regs->ip = addr1;
24162+ return 2;
24163+ }
24164+ } while (0);
24165+
24166+ do { /* PaX: gcc trampoline emulation #2 */
24167+ unsigned short mov1, mov2, jmp1;
24168+ unsigned char jmp2;
24169+ unsigned long addr1, addr2;
24170+
24171+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24172+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24173+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24174+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24175+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24176+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24177+
24178+ if (err)
24179+ break;
24180+
24181+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24182+ regs->r11 = addr1;
24183+ regs->r10 = addr2;
24184+ regs->ip = addr1;
24185+ return 2;
24186+ }
24187+ } while (0);
24188+
24189+ return 1; /* PaX in action */
24190+}
24191+#endif
24192+
24193+/*
24194+ * PaX: decide what to do with offenders (regs->ip = fault address)
24195+ *
24196+ * returns 1 when task should be killed
24197+ * 2 when gcc trampoline was detected
24198+ */
24199+static int pax_handle_fetch_fault(struct pt_regs *regs)
24200+{
24201+ if (v8086_mode(regs))
24202+ return 1;
24203+
24204+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24205+ return 1;
24206+
24207+#ifdef CONFIG_X86_32
24208+ return pax_handle_fetch_fault_32(regs);
24209+#else
24210+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24211+ return pax_handle_fetch_fault_32(regs);
24212+ else
24213+ return pax_handle_fetch_fault_64(regs);
24214+#endif
24215+}
24216+#endif
24217+
24218+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24219+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24220+{
24221+ long i;
24222+
24223+ printk(KERN_ERR "PAX: bytes at PC: ");
24224+ for (i = 0; i < 20; i++) {
24225+ unsigned char c;
24226+ if (get_user(c, (unsigned char __force_user *)pc+i))
24227+ printk(KERN_CONT "?? ");
24228+ else
24229+ printk(KERN_CONT "%02x ", c);
24230+ }
24231+ printk("\n");
24232+
24233+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24234+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
24235+ unsigned long c;
24236+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
24237+#ifdef CONFIG_X86_32
24238+ printk(KERN_CONT "???????? ");
24239+#else
24240+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24241+ printk(KERN_CONT "???????? ???????? ");
24242+ else
24243+ printk(KERN_CONT "???????????????? ");
24244+#endif
24245+ } else {
24246+#ifdef CONFIG_X86_64
24247+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24248+ printk(KERN_CONT "%08x ", (unsigned int)c);
24249+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24250+ } else
24251+#endif
24252+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24253+ }
24254+ }
24255+ printk("\n");
24256+}
24257+#endif
24258+
24259+/**
24260+ * probe_kernel_write(): safely attempt to write to a location
24261+ * @dst: address to write to
24262+ * @src: pointer to the data that shall be written
24263+ * @size: size of the data chunk
24264+ *
24265+ * Safely write to address @dst from the buffer at @src. If a kernel fault
24266+ * happens, handle that and return -EFAULT.
24267+ */
24268+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24269+{
24270+ long ret;
24271+ mm_segment_t old_fs = get_fs();
24272+
24273+ set_fs(KERNEL_DS);
24274+ pagefault_disable();
24275+ pax_open_kernel();
24276+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24277+ pax_close_kernel();
24278+ pagefault_enable();
24279+ set_fs(old_fs);
24280+
24281+ return ret ? -EFAULT : 0;
24282+}
24283diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24284index dd74e46..7d26398 100644
24285--- a/arch/x86/mm/gup.c
24286+++ b/arch/x86/mm/gup.c
24287@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24288 addr = start;
24289 len = (unsigned long) nr_pages << PAGE_SHIFT;
24290 end = start + len;
24291- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24292+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24293 (void __user *)start, len)))
24294 return 0;
24295
24296diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24297index 6f31ee5..8ee4164 100644
24298--- a/arch/x86/mm/highmem_32.c
24299+++ b/arch/x86/mm/highmem_32.c
24300@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
24301 idx = type + KM_TYPE_NR*smp_processor_id();
24302 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24303 BUG_ON(!pte_none(*(kmap_pte-idx)));
24304+
24305+ pax_open_kernel();
24306 set_pte(kmap_pte-idx, mk_pte(page, prot));
24307+ pax_close_kernel();
24308+
24309 arch_flush_lazy_mmu_mode();
24310
24311 return (void *)vaddr;
24312diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24313index f6679a7..8f795a3 100644
24314--- a/arch/x86/mm/hugetlbpage.c
24315+++ b/arch/x86/mm/hugetlbpage.c
24316@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24317 struct hstate *h = hstate_file(file);
24318 struct mm_struct *mm = current->mm;
24319 struct vm_area_struct *vma;
24320- unsigned long start_addr;
24321+ unsigned long start_addr, pax_task_size = TASK_SIZE;
24322+
24323+#ifdef CONFIG_PAX_SEGMEXEC
24324+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24325+ pax_task_size = SEGMEXEC_TASK_SIZE;
24326+#endif
24327+
24328+ pax_task_size -= PAGE_SIZE;
24329
24330 if (len > mm->cached_hole_size) {
24331- start_addr = mm->free_area_cache;
24332+ start_addr = mm->free_area_cache;
24333 } else {
24334- start_addr = TASK_UNMAPPED_BASE;
24335- mm->cached_hole_size = 0;
24336+ start_addr = mm->mmap_base;
24337+ mm->cached_hole_size = 0;
24338 }
24339
24340 full_search:
24341@@ -280,26 +287,27 @@ full_search:
24342
24343 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24344 /* At this point: (!vma || addr < vma->vm_end). */
24345- if (TASK_SIZE - len < addr) {
24346+ if (pax_task_size - len < addr) {
24347 /*
24348 * Start a new search - just in case we missed
24349 * some holes.
24350 */
24351- if (start_addr != TASK_UNMAPPED_BASE) {
24352- start_addr = TASK_UNMAPPED_BASE;
24353+ if (start_addr != mm->mmap_base) {
24354+ start_addr = mm->mmap_base;
24355 mm->cached_hole_size = 0;
24356 goto full_search;
24357 }
24358 return -ENOMEM;
24359 }
24360- if (!vma || addr + len <= vma->vm_start) {
24361- mm->free_area_cache = addr + len;
24362- return addr;
24363- }
24364+ if (check_heap_stack_gap(vma, addr, len))
24365+ break;
24366 if (addr + mm->cached_hole_size < vma->vm_start)
24367 mm->cached_hole_size = vma->vm_start - addr;
24368 addr = ALIGN(vma->vm_end, huge_page_size(h));
24369 }
24370+
24371+ mm->free_area_cache = addr + len;
24372+ return addr;
24373 }
24374
24375 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24376@@ -310,9 +318,8 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24377 struct mm_struct *mm = current->mm;
24378 struct vm_area_struct *vma;
24379 unsigned long base = mm->mmap_base;
24380- unsigned long addr = addr0;
24381+ unsigned long addr;
24382 unsigned long largest_hole = mm->cached_hole_size;
24383- unsigned long start_addr;
24384
24385 /* don't allow allocations above current base */
24386 if (mm->free_area_cache > base)
24387@@ -322,16 +329,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24388 largest_hole = 0;
24389 mm->free_area_cache = base;
24390 }
24391-try_again:
24392- start_addr = mm->free_area_cache;
24393
24394 /* make sure it can fit in the remaining address space */
24395 if (mm->free_area_cache < len)
24396 goto fail;
24397
24398 /* either no address requested or can't fit in requested address hole */
24399- addr = (mm->free_area_cache - len) & huge_page_mask(h);
24400+ addr = mm->free_area_cache - len;
24401 do {
24402+ addr &= huge_page_mask(h);
24403 /*
24404 * Lookup failure means no vma is above this address,
24405 * i.e. return with success:
24406@@ -340,10 +346,10 @@ try_again:
24407 if (!vma)
24408 return addr;
24409
24410- if (addr + len <= vma->vm_start) {
24411+ if (check_heap_stack_gap(vma, addr, len)) {
24412 /* remember the address as a hint for next time */
24413- mm->cached_hole_size = largest_hole;
24414- return (mm->free_area_cache = addr);
24415+ mm->cached_hole_size = largest_hole;
24416+ return (mm->free_area_cache = addr);
24417 } else if (mm->free_area_cache == vma->vm_end) {
24418 /* pull free_area_cache down to the first hole */
24419 mm->free_area_cache = vma->vm_start;
24420@@ -352,29 +358,34 @@ try_again:
24421
24422 /* remember the largest hole we saw so far */
24423 if (addr + largest_hole < vma->vm_start)
24424- largest_hole = vma->vm_start - addr;
24425+ largest_hole = vma->vm_start - addr;
24426
24427 /* try just below the current vma->vm_start */
24428- addr = (vma->vm_start - len) & huge_page_mask(h);
24429- } while (len <= vma->vm_start);
24430+ addr = skip_heap_stack_gap(vma, len);
24431+ } while (!IS_ERR_VALUE(addr));
24432
24433 fail:
24434 /*
24435- * if hint left us with no space for the requested
24436- * mapping then try again:
24437- */
24438- if (start_addr != base) {
24439- mm->free_area_cache = base;
24440- largest_hole = 0;
24441- goto try_again;
24442- }
24443- /*
24444 * A failed mmap() very likely causes application failure,
24445 * so fall back to the bottom-up function here. This scenario
24446 * can happen with large stack limits and large mmap()
24447 * allocations.
24448 */
24449- mm->free_area_cache = TASK_UNMAPPED_BASE;
24450+
24451+#ifdef CONFIG_PAX_SEGMEXEC
24452+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24453+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24454+ else
24455+#endif
24456+
24457+ mm->mmap_base = TASK_UNMAPPED_BASE;
24458+
24459+#ifdef CONFIG_PAX_RANDMMAP
24460+ if (mm->pax_flags & MF_PAX_RANDMMAP)
24461+ mm->mmap_base += mm->delta_mmap;
24462+#endif
24463+
24464+ mm->free_area_cache = mm->mmap_base;
24465 mm->cached_hole_size = ~0UL;
24466 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
24467 len, pgoff, flags);
24468@@ -382,6 +393,7 @@ fail:
24469 /*
24470 * Restore the topdown base:
24471 */
24472+ mm->mmap_base = base;
24473 mm->free_area_cache = base;
24474 mm->cached_hole_size = ~0UL;
24475
24476@@ -395,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24477 struct hstate *h = hstate_file(file);
24478 struct mm_struct *mm = current->mm;
24479 struct vm_area_struct *vma;
24480+ unsigned long pax_task_size = TASK_SIZE;
24481
24482 if (len & ~huge_page_mask(h))
24483 return -EINVAL;
24484- if (len > TASK_SIZE)
24485+
24486+#ifdef CONFIG_PAX_SEGMEXEC
24487+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24488+ pax_task_size = SEGMEXEC_TASK_SIZE;
24489+#endif
24490+
24491+ pax_task_size -= PAGE_SIZE;
24492+
24493+ if (len > pax_task_size)
24494 return -ENOMEM;
24495
24496 if (flags & MAP_FIXED) {
24497@@ -410,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24498 if (addr) {
24499 addr = ALIGN(addr, huge_page_size(h));
24500 vma = find_vma(mm, addr);
24501- if (TASK_SIZE - len >= addr &&
24502- (!vma || addr + len <= vma->vm_start))
24503+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
24504 return addr;
24505 }
24506 if (mm->get_unmapped_area == arch_get_unmapped_area)
24507diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
24508index 4f0cec7..00976ce 100644
24509--- a/arch/x86/mm/init.c
24510+++ b/arch/x86/mm/init.c
24511@@ -16,6 +16,8 @@
24512 #include <asm/tlb.h>
24513 #include <asm/proto.h>
24514 #include <asm/dma.h> /* for MAX_DMA_PFN */
24515+#include <asm/desc.h>
24516+#include <asm/bios_ebda.h>
24517
24518 unsigned long __initdata pgt_buf_start;
24519 unsigned long __meminitdata pgt_buf_end;
24520@@ -32,7 +34,7 @@ int direct_gbpages
24521 static void __init find_early_table_space(unsigned long end, int use_pse,
24522 int use_gbpages)
24523 {
24524- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
24525+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
24526 phys_addr_t base;
24527
24528 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
24529@@ -311,10 +313,37 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24530 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
24531 * mmio resources as well as potential bios/acpi data regions.
24532 */
24533+
24534+#ifdef CONFIG_GRKERNSEC_KMEM
24535+static unsigned int ebda_start __read_only;
24536+static unsigned int ebda_end __read_only;
24537+#endif
24538+
24539 int devmem_is_allowed(unsigned long pagenr)
24540 {
24541+#ifdef CONFIG_GRKERNSEC_KMEM
24542+ /* allow BDA */
24543+ if (!pagenr)
24544+ return 1;
24545+ /* allow EBDA */
24546+ if (pagenr >= ebda_start && pagenr < ebda_end)
24547+ return 1;
24548+#else
24549+ if (!pagenr)
24550+ return 1;
24551+#ifdef CONFIG_VM86
24552+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
24553+ return 1;
24554+#endif
24555+#endif
24556+
24557+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
24558+ return 1;
24559+#ifdef CONFIG_GRKERNSEC_KMEM
24560+ /* throw out everything else below 1MB */
24561 if (pagenr <= 256)
24562- return 1;
24563+ return 0;
24564+#endif
24565 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
24566 return 0;
24567 if (!page_is_ram(pagenr))
24568@@ -371,8 +400,116 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
24569 #endif
24570 }
24571
24572+#ifdef CONFIG_GRKERNSEC_KMEM
24573+static inline void gr_init_ebda(void)
24574+{
24575+ unsigned int ebda_addr;
24576+ unsigned int ebda_size = 0;
24577+
24578+ ebda_addr = get_bios_ebda();
24579+ if (ebda_addr) {
24580+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
24581+ ebda_size <<= 10;
24582+ }
24583+ if (ebda_addr && ebda_size) {
24584+ ebda_start = ebda_addr >> PAGE_SHIFT;
24585+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
24586+ } else {
24587+ ebda_start = 0x9f000 >> PAGE_SHIFT;
24588+ ebda_end = 0xa0000 >> PAGE_SHIFT;
24589+ }
24590+}
24591+#else
24592+static inline void gr_init_ebda(void) { }
24593+#endif
24594+
24595 void free_initmem(void)
24596 {
24597+#ifdef CONFIG_PAX_KERNEXEC
24598+#ifdef CONFIG_X86_32
24599+ /* PaX: limit KERNEL_CS to actual size */
24600+ unsigned long addr, limit;
24601+ struct desc_struct d;
24602+ int cpu;
24603+#else
24604+ pgd_t *pgd;
24605+ pud_t *pud;
24606+ pmd_t *pmd;
24607+ unsigned long addr, end;
24608+#endif
24609+#endif
24610+
24611+ gr_init_ebda();
24612+
24613+#ifdef CONFIG_PAX_KERNEXEC
24614+#ifdef CONFIG_X86_32
24615+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
24616+ limit = (limit - 1UL) >> PAGE_SHIFT;
24617+
24618+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
24619+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
24620+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
24621+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
24622+ }
24623+
24624+ /* PaX: make KERNEL_CS read-only */
24625+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
24626+ if (!paravirt_enabled())
24627+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
24628+/*
24629+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
24630+ pgd = pgd_offset_k(addr);
24631+ pud = pud_offset(pgd, addr);
24632+ pmd = pmd_offset(pud, addr);
24633+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24634+ }
24635+*/
24636+#ifdef CONFIG_X86_PAE
24637+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
24638+/*
24639+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
24640+ pgd = pgd_offset_k(addr);
24641+ pud = pud_offset(pgd, addr);
24642+ pmd = pmd_offset(pud, addr);
24643+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24644+ }
24645+*/
24646+#endif
24647+
24648+#ifdef CONFIG_MODULES
24649+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
24650+#endif
24651+
24652+#else
24653+ /* PaX: make kernel code/rodata read-only, rest non-executable */
24654+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
24655+ pgd = pgd_offset_k(addr);
24656+ pud = pud_offset(pgd, addr);
24657+ pmd = pmd_offset(pud, addr);
24658+ if (!pmd_present(*pmd))
24659+ continue;
24660+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
24661+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24662+ else
24663+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24664+ }
24665+
24666+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24667+ end = addr + KERNEL_IMAGE_SIZE;
24668+ for (; addr < end; addr += PMD_SIZE) {
24669+ pgd = pgd_offset_k(addr);
24670+ pud = pud_offset(pgd, addr);
24671+ pmd = pmd_offset(pud, addr);
24672+ if (!pmd_present(*pmd))
24673+ continue;
24674+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24675+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24676+ }
24677+#endif
24678+
24679+ flush_tlb_all();
24680+#endif
24681+
24682 free_init_pages("unused kernel memory",
24683 (unsigned long)(&__init_begin),
24684 (unsigned long)(&__init_end));
24685diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24686index 575d86f..4987469 100644
24687--- a/arch/x86/mm/init_32.c
24688+++ b/arch/x86/mm/init_32.c
24689@@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
24690 }
24691
24692 /*
24693- * Creates a middle page table and puts a pointer to it in the
24694- * given global directory entry. This only returns the gd entry
24695- * in non-PAE compilation mode, since the middle layer is folded.
24696- */
24697-static pmd_t * __init one_md_table_init(pgd_t *pgd)
24698-{
24699- pud_t *pud;
24700- pmd_t *pmd_table;
24701-
24702-#ifdef CONFIG_X86_PAE
24703- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24704- if (after_bootmem)
24705- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24706- else
24707- pmd_table = (pmd_t *)alloc_low_page();
24708- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24709- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24710- pud = pud_offset(pgd, 0);
24711- BUG_ON(pmd_table != pmd_offset(pud, 0));
24712-
24713- return pmd_table;
24714- }
24715-#endif
24716- pud = pud_offset(pgd, 0);
24717- pmd_table = pmd_offset(pud, 0);
24718-
24719- return pmd_table;
24720-}
24721-
24722-/*
24723 * Create a page table and place a pointer to it in a middle page
24724 * directory entry:
24725 */
24726@@ -122,13 +92,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
24727 page_table = (pte_t *)alloc_low_page();
24728
24729 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
24730+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24731+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
24732+#else
24733 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
24734+#endif
24735 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
24736 }
24737
24738 return pte_offset_kernel(pmd, 0);
24739 }
24740
24741+static pmd_t * __init one_md_table_init(pgd_t *pgd)
24742+{
24743+ pud_t *pud;
24744+ pmd_t *pmd_table;
24745+
24746+ pud = pud_offset(pgd, 0);
24747+ pmd_table = pmd_offset(pud, 0);
24748+
24749+ return pmd_table;
24750+}
24751+
24752 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
24753 {
24754 int pgd_idx = pgd_index(vaddr);
24755@@ -202,6 +187,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24756 int pgd_idx, pmd_idx;
24757 unsigned long vaddr;
24758 pgd_t *pgd;
24759+ pud_t *pud;
24760 pmd_t *pmd;
24761 pte_t *pte = NULL;
24762
24763@@ -211,8 +197,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24764 pgd = pgd_base + pgd_idx;
24765
24766 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
24767- pmd = one_md_table_init(pgd);
24768- pmd = pmd + pmd_index(vaddr);
24769+ pud = pud_offset(pgd, vaddr);
24770+ pmd = pmd_offset(pud, vaddr);
24771+
24772+#ifdef CONFIG_X86_PAE
24773+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24774+#endif
24775+
24776 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
24777 pmd++, pmd_idx++) {
24778 pte = page_table_kmap_check(one_page_table_init(pmd),
24779@@ -224,11 +215,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24780 }
24781 }
24782
24783-static inline int is_kernel_text(unsigned long addr)
24784+static inline int is_kernel_text(unsigned long start, unsigned long end)
24785 {
24786- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
24787- return 1;
24788- return 0;
24789+ if ((start > ktla_ktva((unsigned long)_etext) ||
24790+ end <= ktla_ktva((unsigned long)_stext)) &&
24791+ (start > ktla_ktva((unsigned long)_einittext) ||
24792+ end <= ktla_ktva((unsigned long)_sinittext)) &&
24793+
24794+#ifdef CONFIG_ACPI_SLEEP
24795+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
24796+#endif
24797+
24798+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
24799+ return 0;
24800+ return 1;
24801 }
24802
24803 /*
24804@@ -245,9 +245,10 @@ kernel_physical_mapping_init(unsigned long start,
24805 unsigned long last_map_addr = end;
24806 unsigned long start_pfn, end_pfn;
24807 pgd_t *pgd_base = swapper_pg_dir;
24808- int pgd_idx, pmd_idx, pte_ofs;
24809+ unsigned int pgd_idx, pmd_idx, pte_ofs;
24810 unsigned long pfn;
24811 pgd_t *pgd;
24812+ pud_t *pud;
24813 pmd_t *pmd;
24814 pte_t *pte;
24815 unsigned pages_2m, pages_4k;
24816@@ -280,8 +281,13 @@ repeat:
24817 pfn = start_pfn;
24818 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24819 pgd = pgd_base + pgd_idx;
24820- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
24821- pmd = one_md_table_init(pgd);
24822+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
24823+ pud = pud_offset(pgd, 0);
24824+ pmd = pmd_offset(pud, 0);
24825+
24826+#ifdef CONFIG_X86_PAE
24827+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24828+#endif
24829
24830 if (pfn >= end_pfn)
24831 continue;
24832@@ -293,14 +299,13 @@ repeat:
24833 #endif
24834 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
24835 pmd++, pmd_idx++) {
24836- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
24837+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
24838
24839 /*
24840 * Map with big pages if possible, otherwise
24841 * create normal page tables:
24842 */
24843 if (use_pse) {
24844- unsigned int addr2;
24845 pgprot_t prot = PAGE_KERNEL_LARGE;
24846 /*
24847 * first pass will use the same initial
24848@@ -310,11 +315,7 @@ repeat:
24849 __pgprot(PTE_IDENT_ATTR |
24850 _PAGE_PSE);
24851
24852- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
24853- PAGE_OFFSET + PAGE_SIZE-1;
24854-
24855- if (is_kernel_text(addr) ||
24856- is_kernel_text(addr2))
24857+ if (is_kernel_text(address, address + PMD_SIZE))
24858 prot = PAGE_KERNEL_LARGE_EXEC;
24859
24860 pages_2m++;
24861@@ -331,7 +332,7 @@ repeat:
24862 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24863 pte += pte_ofs;
24864 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
24865- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
24866+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
24867 pgprot_t prot = PAGE_KERNEL;
24868 /*
24869 * first pass will use the same initial
24870@@ -339,7 +340,7 @@ repeat:
24871 */
24872 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
24873
24874- if (is_kernel_text(addr))
24875+ if (is_kernel_text(address, address + PAGE_SIZE))
24876 prot = PAGE_KERNEL_EXEC;
24877
24878 pages_4k++;
24879@@ -465,7 +466,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
24880
24881 pud = pud_offset(pgd, va);
24882 pmd = pmd_offset(pud, va);
24883- if (!pmd_present(*pmd))
24884+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
24885 break;
24886
24887 pte = pte_offset_kernel(pmd, va);
24888@@ -517,12 +518,10 @@ void __init early_ioremap_page_table_range_init(void)
24889
24890 static void __init pagetable_init(void)
24891 {
24892- pgd_t *pgd_base = swapper_pg_dir;
24893-
24894- permanent_kmaps_init(pgd_base);
24895+ permanent_kmaps_init(swapper_pg_dir);
24896 }
24897
24898-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24899+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24900 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24901
24902 /* user-defined highmem size */
24903@@ -734,6 +733,12 @@ void __init mem_init(void)
24904
24905 pci_iommu_alloc();
24906
24907+#ifdef CONFIG_PAX_PER_CPU_PGD
24908+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
24909+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24910+ KERNEL_PGD_PTRS);
24911+#endif
24912+
24913 #ifdef CONFIG_FLATMEM
24914 BUG_ON(!mem_map);
24915 #endif
24916@@ -760,7 +765,7 @@ void __init mem_init(void)
24917 reservedpages++;
24918
24919 codesize = (unsigned long) &_etext - (unsigned long) &_text;
24920- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
24921+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
24922 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
24923
24924 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
24925@@ -801,10 +806,10 @@ void __init mem_init(void)
24926 ((unsigned long)&__init_end -
24927 (unsigned long)&__init_begin) >> 10,
24928
24929- (unsigned long)&_etext, (unsigned long)&_edata,
24930- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
24931+ (unsigned long)&_sdata, (unsigned long)&_edata,
24932+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
24933
24934- (unsigned long)&_text, (unsigned long)&_etext,
24935+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
24936 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
24937
24938 /*
24939@@ -882,6 +887,7 @@ void set_kernel_text_rw(void)
24940 if (!kernel_set_to_readonly)
24941 return;
24942
24943+ start = ktla_ktva(start);
24944 pr_debug("Set kernel text: %lx - %lx for read write\n",
24945 start, start+size);
24946
24947@@ -896,6 +902,7 @@ void set_kernel_text_ro(void)
24948 if (!kernel_set_to_readonly)
24949 return;
24950
24951+ start = ktla_ktva(start);
24952 pr_debug("Set kernel text: %lx - %lx for read only\n",
24953 start, start+size);
24954
24955@@ -924,6 +931,7 @@ void mark_rodata_ro(void)
24956 unsigned long start = PFN_ALIGN(_text);
24957 unsigned long size = PFN_ALIGN(_etext) - start;
24958
24959+ start = ktla_ktva(start);
24960 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
24961 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
24962 size >> 10);
24963diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
24964index fc18be0..e539653 100644
24965--- a/arch/x86/mm/init_64.c
24966+++ b/arch/x86/mm/init_64.c
24967@@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpages_on);
24968 * around without checking the pgd every time.
24969 */
24970
24971-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
24972+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
24973 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24974
24975 int force_personality32;
24976@@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24977
24978 for (address = start; address <= end; address += PGDIR_SIZE) {
24979 const pgd_t *pgd_ref = pgd_offset_k(address);
24980+
24981+#ifdef CONFIG_PAX_PER_CPU_PGD
24982+ unsigned long cpu;
24983+#else
24984 struct page *page;
24985+#endif
24986
24987 if (pgd_none(*pgd_ref))
24988 continue;
24989
24990 spin_lock(&pgd_lock);
24991+
24992+#ifdef CONFIG_PAX_PER_CPU_PGD
24993+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24994+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
24995+#else
24996 list_for_each_entry(page, &pgd_list, lru) {
24997 pgd_t *pgd;
24998 spinlock_t *pgt_lock;
24999@@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25000 /* the pgt_lock only for Xen */
25001 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
25002 spin_lock(pgt_lock);
25003+#endif
25004
25005 if (pgd_none(*pgd))
25006 set_pgd(pgd, *pgd_ref);
25007@@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25008 BUG_ON(pgd_page_vaddr(*pgd)
25009 != pgd_page_vaddr(*pgd_ref));
25010
25011+#ifndef CONFIG_PAX_PER_CPU_PGD
25012 spin_unlock(pgt_lock);
25013+#endif
25014+
25015 }
25016 spin_unlock(&pgd_lock);
25017 }
25018@@ -161,7 +175,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
25019 {
25020 if (pgd_none(*pgd)) {
25021 pud_t *pud = (pud_t *)spp_getpage();
25022- pgd_populate(&init_mm, pgd, pud);
25023+ pgd_populate_kernel(&init_mm, pgd, pud);
25024 if (pud != pud_offset(pgd, 0))
25025 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
25026 pud, pud_offset(pgd, 0));
25027@@ -173,7 +187,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
25028 {
25029 if (pud_none(*pud)) {
25030 pmd_t *pmd = (pmd_t *) spp_getpage();
25031- pud_populate(&init_mm, pud, pmd);
25032+ pud_populate_kernel(&init_mm, pud, pmd);
25033 if (pmd != pmd_offset(pud, 0))
25034 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
25035 pmd, pmd_offset(pud, 0));
25036@@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25037 pmd = fill_pmd(pud, vaddr);
25038 pte = fill_pte(pmd, vaddr);
25039
25040+ pax_open_kernel();
25041 set_pte(pte, new_pte);
25042+ pax_close_kernel();
25043
25044 /*
25045 * It's enough to flush this one mapping.
25046@@ -261,14 +277,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25047 pgd = pgd_offset_k((unsigned long)__va(phys));
25048 if (pgd_none(*pgd)) {
25049 pud = (pud_t *) spp_getpage();
25050- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25051- _PAGE_USER));
25052+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25053 }
25054 pud = pud_offset(pgd, (unsigned long)__va(phys));
25055 if (pud_none(*pud)) {
25056 pmd = (pmd_t *) spp_getpage();
25057- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25058- _PAGE_USER));
25059+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25060 }
25061 pmd = pmd_offset(pud, phys);
25062 BUG_ON(!pmd_none(*pmd));
25063@@ -329,7 +343,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
25064 if (pfn >= pgt_buf_top)
25065 panic("alloc_low_page: ran out of memory");
25066
25067- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25068+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25069 clear_page(adr);
25070 *phys = pfn * PAGE_SIZE;
25071 return adr;
25072@@ -345,7 +359,7 @@ static __ref void *map_low_page(void *virt)
25073
25074 phys = __pa(virt);
25075 left = phys & (PAGE_SIZE - 1);
25076- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25077+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25078 adr = (void *)(((unsigned long)adr) | left);
25079
25080 return adr;
25081@@ -545,7 +559,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
25082 unmap_low_page(pmd);
25083
25084 spin_lock(&init_mm.page_table_lock);
25085- pud_populate(&init_mm, pud, __va(pmd_phys));
25086+ pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
25087 spin_unlock(&init_mm.page_table_lock);
25088 }
25089 __flush_tlb_all();
25090@@ -591,7 +605,7 @@ kernel_physical_mapping_init(unsigned long start,
25091 unmap_low_page(pud);
25092
25093 spin_lock(&init_mm.page_table_lock);
25094- pgd_populate(&init_mm, pgd, __va(pud_phys));
25095+ pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
25096 spin_unlock(&init_mm.page_table_lock);
25097 pgd_changed = true;
25098 }
25099@@ -683,6 +697,12 @@ void __init mem_init(void)
25100
25101 pci_iommu_alloc();
25102
25103+#ifdef CONFIG_PAX_PER_CPU_PGD
25104+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25105+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25106+ KERNEL_PGD_PTRS);
25107+#endif
25108+
25109 /* clear_bss() already clear the empty_zero_page */
25110
25111 reservedpages = 0;
25112@@ -843,8 +863,8 @@ int kern_addr_valid(unsigned long addr)
25113 static struct vm_area_struct gate_vma = {
25114 .vm_start = VSYSCALL_START,
25115 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25116- .vm_page_prot = PAGE_READONLY_EXEC,
25117- .vm_flags = VM_READ | VM_EXEC
25118+ .vm_page_prot = PAGE_READONLY,
25119+ .vm_flags = VM_READ
25120 };
25121
25122 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
25123@@ -878,7 +898,7 @@ int in_gate_area_no_mm(unsigned long addr)
25124
25125 const char *arch_vma_name(struct vm_area_struct *vma)
25126 {
25127- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25128+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25129 return "[vdso]";
25130 if (vma == &gate_vma)
25131 return "[vsyscall]";
25132diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25133index 7b179b4..6bd1777 100644
25134--- a/arch/x86/mm/iomap_32.c
25135+++ b/arch/x86/mm/iomap_32.c
25136@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
25137 type = kmap_atomic_idx_push();
25138 idx = type + KM_TYPE_NR * smp_processor_id();
25139 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25140+
25141+ pax_open_kernel();
25142 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25143+ pax_close_kernel();
25144+
25145 arch_flush_lazy_mmu_mode();
25146
25147 return (void *)vaddr;
25148diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25149index be1ef57..55f0160 100644
25150--- a/arch/x86/mm/ioremap.c
25151+++ b/arch/x86/mm/ioremap.c
25152@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25153 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
25154 int is_ram = page_is_ram(pfn);
25155
25156- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25157+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25158 return NULL;
25159 WARN_ON_ONCE(is_ram);
25160 }
25161@@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
25162
25163 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
25164 if (page_is_ram(start >> PAGE_SHIFT))
25165+#ifdef CONFIG_HIGHMEM
25166+ if ((start >> PAGE_SHIFT) < max_low_pfn)
25167+#endif
25168 return __va(phys);
25169
25170 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
25171@@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
25172 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25173
25174 static __initdata int after_paging_init;
25175-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25176+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25177
25178 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25179 {
25180@@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
25181 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25182
25183 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25184- memset(bm_pte, 0, sizeof(bm_pte));
25185- pmd_populate_kernel(&init_mm, pmd, bm_pte);
25186+ pmd_populate_user(&init_mm, pmd, bm_pte);
25187
25188 /*
25189 * The boot-ioremap range spans multiple pmds, for which
25190diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25191index d87dd6d..bf3fa66 100644
25192--- a/arch/x86/mm/kmemcheck/kmemcheck.c
25193+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25194@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25195 * memory (e.g. tracked pages)? For now, we need this to avoid
25196 * invoking kmemcheck for PnP BIOS calls.
25197 */
25198- if (regs->flags & X86_VM_MASK)
25199+ if (v8086_mode(regs))
25200 return false;
25201- if (regs->cs != __KERNEL_CS)
25202+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25203 return false;
25204
25205 pte = kmemcheck_pte_lookup(address);
25206diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25207index 845df68..1d8d29f 100644
25208--- a/arch/x86/mm/mmap.c
25209+++ b/arch/x86/mm/mmap.c
25210@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
25211 * Leave an at least ~128 MB hole with possible stack randomization.
25212 */
25213 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25214-#define MAX_GAP (TASK_SIZE/6*5)
25215+#define MAX_GAP (pax_task_size/6*5)
25216
25217 static int mmap_is_legacy(void)
25218 {
25219@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
25220 return rnd << PAGE_SHIFT;
25221 }
25222
25223-static unsigned long mmap_base(void)
25224+static unsigned long mmap_base(struct mm_struct *mm)
25225 {
25226 unsigned long gap = rlimit(RLIMIT_STACK);
25227+ unsigned long pax_task_size = TASK_SIZE;
25228+
25229+#ifdef CONFIG_PAX_SEGMEXEC
25230+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25231+ pax_task_size = SEGMEXEC_TASK_SIZE;
25232+#endif
25233
25234 if (gap < MIN_GAP)
25235 gap = MIN_GAP;
25236 else if (gap > MAX_GAP)
25237 gap = MAX_GAP;
25238
25239- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25240+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25241 }
25242
25243 /*
25244 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25245 * does, but not when emulating X86_32
25246 */
25247-static unsigned long mmap_legacy_base(void)
25248+static unsigned long mmap_legacy_base(struct mm_struct *mm)
25249 {
25250- if (mmap_is_ia32())
25251+ if (mmap_is_ia32()) {
25252+
25253+#ifdef CONFIG_PAX_SEGMEXEC
25254+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25255+ return SEGMEXEC_TASK_UNMAPPED_BASE;
25256+ else
25257+#endif
25258+
25259 return TASK_UNMAPPED_BASE;
25260- else
25261+ } else
25262 return TASK_UNMAPPED_BASE + mmap_rnd();
25263 }
25264
25265@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
25266 void arch_pick_mmap_layout(struct mm_struct *mm)
25267 {
25268 if (mmap_is_legacy()) {
25269- mm->mmap_base = mmap_legacy_base();
25270+ mm->mmap_base = mmap_legacy_base(mm);
25271+
25272+#ifdef CONFIG_PAX_RANDMMAP
25273+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25274+ mm->mmap_base += mm->delta_mmap;
25275+#endif
25276+
25277 mm->get_unmapped_area = arch_get_unmapped_area;
25278 mm->unmap_area = arch_unmap_area;
25279 } else {
25280- mm->mmap_base = mmap_base();
25281+ mm->mmap_base = mmap_base(mm);
25282+
25283+#ifdef CONFIG_PAX_RANDMMAP
25284+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25285+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25286+#endif
25287+
25288 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25289 mm->unmap_area = arch_unmap_area_topdown;
25290 }
25291diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25292index dc0b727..dc9d71a 100644
25293--- a/arch/x86/mm/mmio-mod.c
25294+++ b/arch/x86/mm/mmio-mod.c
25295@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25296 break;
25297 default:
25298 {
25299- unsigned char *ip = (unsigned char *)instptr;
25300+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25301 my_trace->opcode = MMIO_UNKNOWN_OP;
25302 my_trace->width = 0;
25303 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25304@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25305 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25306 void __iomem *addr)
25307 {
25308- static atomic_t next_id;
25309+ static atomic_unchecked_t next_id;
25310 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25311 /* These are page-unaligned. */
25312 struct mmiotrace_map map = {
25313@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25314 .private = trace
25315 },
25316 .phys = offset,
25317- .id = atomic_inc_return(&next_id)
25318+ .id = atomic_inc_return_unchecked(&next_id)
25319 };
25320 map.map_id = trace->id;
25321
25322diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25323index b008656..773eac2 100644
25324--- a/arch/x86/mm/pageattr-test.c
25325+++ b/arch/x86/mm/pageattr-test.c
25326@@ -36,7 +36,7 @@ enum {
25327
25328 static int pte_testbit(pte_t pte)
25329 {
25330- return pte_flags(pte) & _PAGE_UNUSED1;
25331+ return pte_flags(pte) & _PAGE_CPA_TEST;
25332 }
25333
25334 struct split_state {
25335diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25336index e1ebde3..b1e1db38 100644
25337--- a/arch/x86/mm/pageattr.c
25338+++ b/arch/x86/mm/pageattr.c
25339@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25340 */
25341 #ifdef CONFIG_PCI_BIOS
25342 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25343- pgprot_val(forbidden) |= _PAGE_NX;
25344+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25345 #endif
25346
25347 /*
25348@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25349 * Does not cover __inittext since that is gone later on. On
25350 * 64bit we do not enforce !NX on the low mapping
25351 */
25352- if (within(address, (unsigned long)_text, (unsigned long)_etext))
25353- pgprot_val(forbidden) |= _PAGE_NX;
25354+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25355+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25356
25357+#ifdef CONFIG_DEBUG_RODATA
25358 /*
25359 * The .rodata section needs to be read-only. Using the pfn
25360 * catches all aliases.
25361@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25362 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25363 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25364 pgprot_val(forbidden) |= _PAGE_RW;
25365+#endif
25366
25367 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
25368 /*
25369@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25370 }
25371 #endif
25372
25373+#ifdef CONFIG_PAX_KERNEXEC
25374+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25375+ pgprot_val(forbidden) |= _PAGE_RW;
25376+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25377+ }
25378+#endif
25379+
25380 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25381
25382 return prot;
25383@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25384 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25385 {
25386 /* change init_mm */
25387+ pax_open_kernel();
25388 set_pte_atomic(kpte, pte);
25389+
25390 #ifdef CONFIG_X86_32
25391 if (!SHARED_KERNEL_PMD) {
25392+
25393+#ifdef CONFIG_PAX_PER_CPU_PGD
25394+ unsigned long cpu;
25395+#else
25396 struct page *page;
25397+#endif
25398
25399+#ifdef CONFIG_PAX_PER_CPU_PGD
25400+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25401+ pgd_t *pgd = get_cpu_pgd(cpu);
25402+#else
25403 list_for_each_entry(page, &pgd_list, lru) {
25404- pgd_t *pgd;
25405+ pgd_t *pgd = (pgd_t *)page_address(page);
25406+#endif
25407+
25408 pud_t *pud;
25409 pmd_t *pmd;
25410
25411- pgd = (pgd_t *)page_address(page) + pgd_index(address);
25412+ pgd += pgd_index(address);
25413 pud = pud_offset(pgd, address);
25414 pmd = pmd_offset(pud, address);
25415 set_pte_atomic((pte_t *)pmd, pte);
25416 }
25417 }
25418 #endif
25419+ pax_close_kernel();
25420 }
25421
25422 static int
25423diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25424index f6ff57b..481690f 100644
25425--- a/arch/x86/mm/pat.c
25426+++ b/arch/x86/mm/pat.c
25427@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
25428
25429 if (!entry) {
25430 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25431- current->comm, current->pid, start, end);
25432+ current->comm, task_pid_nr(current), start, end);
25433 return -EINVAL;
25434 }
25435
25436@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25437 while (cursor < to) {
25438 if (!devmem_is_allowed(pfn)) {
25439 printk(KERN_INFO
25440- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25441- current->comm, from, to);
25442+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25443+ current->comm, from, to, cursor);
25444 return 0;
25445 }
25446 cursor += PAGE_SIZE;
25447@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25448 printk(KERN_INFO
25449 "%s:%d ioremap_change_attr failed %s "
25450 "for %Lx-%Lx\n",
25451- current->comm, current->pid,
25452+ current->comm, task_pid_nr(current),
25453 cattr_name(flags),
25454 base, (unsigned long long)(base + size));
25455 return -EINVAL;
25456@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25457 if (want_flags != flags) {
25458 printk(KERN_WARNING
25459 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
25460- current->comm, current->pid,
25461+ current->comm, task_pid_nr(current),
25462 cattr_name(want_flags),
25463 (unsigned long long)paddr,
25464 (unsigned long long)(paddr + size),
25465@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25466 free_memtype(paddr, paddr + size);
25467 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25468 " for %Lx-%Lx, got %s\n",
25469- current->comm, current->pid,
25470+ current->comm, task_pid_nr(current),
25471 cattr_name(want_flags),
25472 (unsigned long long)paddr,
25473 (unsigned long long)(paddr + size),
25474diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25475index 9f0614d..92ae64a 100644
25476--- a/arch/x86/mm/pf_in.c
25477+++ b/arch/x86/mm/pf_in.c
25478@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25479 int i;
25480 enum reason_type rv = OTHERS;
25481
25482- p = (unsigned char *)ins_addr;
25483+ p = (unsigned char *)ktla_ktva(ins_addr);
25484 p += skip_prefix(p, &prf);
25485 p += get_opcode(p, &opcode);
25486
25487@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25488 struct prefix_bits prf;
25489 int i;
25490
25491- p = (unsigned char *)ins_addr;
25492+ p = (unsigned char *)ktla_ktva(ins_addr);
25493 p += skip_prefix(p, &prf);
25494 p += get_opcode(p, &opcode);
25495
25496@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25497 struct prefix_bits prf;
25498 int i;
25499
25500- p = (unsigned char *)ins_addr;
25501+ p = (unsigned char *)ktla_ktva(ins_addr);
25502 p += skip_prefix(p, &prf);
25503 p += get_opcode(p, &opcode);
25504
25505@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25506 struct prefix_bits prf;
25507 int i;
25508
25509- p = (unsigned char *)ins_addr;
25510+ p = (unsigned char *)ktla_ktva(ins_addr);
25511 p += skip_prefix(p, &prf);
25512 p += get_opcode(p, &opcode);
25513 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25514@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25515 struct prefix_bits prf;
25516 int i;
25517
25518- p = (unsigned char *)ins_addr;
25519+ p = (unsigned char *)ktla_ktva(ins_addr);
25520 p += skip_prefix(p, &prf);
25521 p += get_opcode(p, &opcode);
25522 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25523diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25524index 8573b83..4f3ed7e 100644
25525--- a/arch/x86/mm/pgtable.c
25526+++ b/arch/x86/mm/pgtable.c
25527@@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
25528 list_del(&page->lru);
25529 }
25530
25531-#define UNSHARED_PTRS_PER_PGD \
25532- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25533+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25534+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25535
25536+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
25537+{
25538+ unsigned int count = USER_PGD_PTRS;
25539
25540+ while (count--)
25541+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
25542+}
25543+#endif
25544+
25545+#ifdef CONFIG_PAX_PER_CPU_PGD
25546+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
25547+{
25548+ unsigned int count = USER_PGD_PTRS;
25549+
25550+ while (count--) {
25551+ pgd_t pgd;
25552+
25553+#ifdef CONFIG_X86_64
25554+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
25555+#else
25556+ pgd = *src++;
25557+#endif
25558+
25559+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25560+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
25561+#endif
25562+
25563+ *dst++ = pgd;
25564+ }
25565+
25566+}
25567+#endif
25568+
25569+#ifdef CONFIG_X86_64
25570+#define pxd_t pud_t
25571+#define pyd_t pgd_t
25572+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
25573+#define pxd_free(mm, pud) pud_free((mm), (pud))
25574+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
25575+#define pyd_offset(mm, address) pgd_offset((mm), (address))
25576+#define PYD_SIZE PGDIR_SIZE
25577+#else
25578+#define pxd_t pmd_t
25579+#define pyd_t pud_t
25580+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
25581+#define pxd_free(mm, pud) pmd_free((mm), (pud))
25582+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
25583+#define pyd_offset(mm, address) pud_offset((mm), (address))
25584+#define PYD_SIZE PUD_SIZE
25585+#endif
25586+
25587+#ifdef CONFIG_PAX_PER_CPU_PGD
25588+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
25589+static inline void pgd_dtor(pgd_t *pgd) {}
25590+#else
25591 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
25592 {
25593 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
25594@@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
25595 pgd_list_del(pgd);
25596 spin_unlock(&pgd_lock);
25597 }
25598+#endif
25599
25600 /*
25601 * List of all pgd's needed for non-PAE so it can invalidate entries
25602@@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
25603 * -- wli
25604 */
25605
25606-#ifdef CONFIG_X86_PAE
25607+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25608 /*
25609 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
25610 * updating the top-level pagetable entries to guarantee the
25611@@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
25612 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
25613 * and initialize the kernel pmds here.
25614 */
25615-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
25616+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25617
25618 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25619 {
25620@@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25621 */
25622 flush_tlb_mm(mm);
25623 }
25624+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
25625+#define PREALLOCATED_PXDS USER_PGD_PTRS
25626 #else /* !CONFIG_X86_PAE */
25627
25628 /* No need to prepopulate any pagetable entries in non-PAE modes. */
25629-#define PREALLOCATED_PMDS 0
25630+#define PREALLOCATED_PXDS 0
25631
25632 #endif /* CONFIG_X86_PAE */
25633
25634-static void free_pmds(pmd_t *pmds[])
25635+static void free_pxds(pxd_t *pxds[])
25636 {
25637 int i;
25638
25639- for(i = 0; i < PREALLOCATED_PMDS; i++)
25640- if (pmds[i])
25641- free_page((unsigned long)pmds[i]);
25642+ for(i = 0; i < PREALLOCATED_PXDS; i++)
25643+ if (pxds[i])
25644+ free_page((unsigned long)pxds[i]);
25645 }
25646
25647-static int preallocate_pmds(pmd_t *pmds[])
25648+static int preallocate_pxds(pxd_t *pxds[])
25649 {
25650 int i;
25651 bool failed = false;
25652
25653- for(i = 0; i < PREALLOCATED_PMDS; i++) {
25654- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
25655- if (pmd == NULL)
25656+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
25657+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
25658+ if (pxd == NULL)
25659 failed = true;
25660- pmds[i] = pmd;
25661+ pxds[i] = pxd;
25662 }
25663
25664 if (failed) {
25665- free_pmds(pmds);
25666+ free_pxds(pxds);
25667 return -ENOMEM;
25668 }
25669
25670@@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[])
25671 * preallocate which never got a corresponding vma will need to be
25672 * freed manually.
25673 */
25674-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
25675+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
25676 {
25677 int i;
25678
25679- for(i = 0; i < PREALLOCATED_PMDS; i++) {
25680+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
25681 pgd_t pgd = pgdp[i];
25682
25683 if (pgd_val(pgd) != 0) {
25684- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
25685+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
25686
25687- pgdp[i] = native_make_pgd(0);
25688+ set_pgd(pgdp + i, native_make_pgd(0));
25689
25690- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
25691- pmd_free(mm, pmd);
25692+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
25693+ pxd_free(mm, pxd);
25694 }
25695 }
25696 }
25697
25698-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
25699+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
25700 {
25701- pud_t *pud;
25702+ pyd_t *pyd;
25703 unsigned long addr;
25704 int i;
25705
25706- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
25707+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
25708 return;
25709
25710- pud = pud_offset(pgd, 0);
25711+#ifdef CONFIG_X86_64
25712+ pyd = pyd_offset(mm, 0L);
25713+#else
25714+ pyd = pyd_offset(pgd, 0L);
25715+#endif
25716
25717- for (addr = i = 0; i < PREALLOCATED_PMDS;
25718- i++, pud++, addr += PUD_SIZE) {
25719- pmd_t *pmd = pmds[i];
25720+ for (addr = i = 0; i < PREALLOCATED_PXDS;
25721+ i++, pyd++, addr += PYD_SIZE) {
25722+ pxd_t *pxd = pxds[i];
25723
25724 if (i >= KERNEL_PGD_BOUNDARY)
25725- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25726- sizeof(pmd_t) * PTRS_PER_PMD);
25727+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25728+ sizeof(pxd_t) * PTRS_PER_PMD);
25729
25730- pud_populate(mm, pud, pmd);
25731+ pyd_populate(mm, pyd, pxd);
25732 }
25733 }
25734
25735 pgd_t *pgd_alloc(struct mm_struct *mm)
25736 {
25737 pgd_t *pgd;
25738- pmd_t *pmds[PREALLOCATED_PMDS];
25739+ pxd_t *pxds[PREALLOCATED_PXDS];
25740
25741 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25742
25743@@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25744
25745 mm->pgd = pgd;
25746
25747- if (preallocate_pmds(pmds) != 0)
25748+ if (preallocate_pxds(pxds) != 0)
25749 goto out_free_pgd;
25750
25751 if (paravirt_pgd_alloc(mm) != 0)
25752- goto out_free_pmds;
25753+ goto out_free_pxds;
25754
25755 /*
25756 * Make sure that pre-populating the pmds is atomic with
25757@@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25758 spin_lock(&pgd_lock);
25759
25760 pgd_ctor(mm, pgd);
25761- pgd_prepopulate_pmd(mm, pgd, pmds);
25762+ pgd_prepopulate_pxd(mm, pgd, pxds);
25763
25764 spin_unlock(&pgd_lock);
25765
25766 return pgd;
25767
25768-out_free_pmds:
25769- free_pmds(pmds);
25770+out_free_pxds:
25771+ free_pxds(pxds);
25772 out_free_pgd:
25773 free_page((unsigned long)pgd);
25774 out:
25775@@ -295,7 +356,7 @@ out:
25776
25777 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
25778 {
25779- pgd_mop_up_pmds(mm, pgd);
25780+ pgd_mop_up_pxds(mm, pgd);
25781 pgd_dtor(pgd);
25782 paravirt_pgd_free(mm, pgd);
25783 free_page((unsigned long)pgd);
25784diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
25785index a69bcb8..19068ab 100644
25786--- a/arch/x86/mm/pgtable_32.c
25787+++ b/arch/x86/mm/pgtable_32.c
25788@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
25789 return;
25790 }
25791 pte = pte_offset_kernel(pmd, vaddr);
25792+
25793+ pax_open_kernel();
25794 if (pte_val(pteval))
25795 set_pte_at(&init_mm, vaddr, pte, pteval);
25796 else
25797 pte_clear(&init_mm, vaddr, pte);
25798+ pax_close_kernel();
25799
25800 /*
25801 * It's enough to flush this one mapping.
25802diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
25803index 410531d..0f16030 100644
25804--- a/arch/x86/mm/setup_nx.c
25805+++ b/arch/x86/mm/setup_nx.c
25806@@ -5,8 +5,10 @@
25807 #include <asm/pgtable.h>
25808 #include <asm/proto.h>
25809
25810+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25811 static int disable_nx __cpuinitdata;
25812
25813+#ifndef CONFIG_PAX_PAGEEXEC
25814 /*
25815 * noexec = on|off
25816 *
25817@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
25818 return 0;
25819 }
25820 early_param("noexec", noexec_setup);
25821+#endif
25822+
25823+#endif
25824
25825 void __cpuinit x86_configure_nx(void)
25826 {
25827+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25828 if (cpu_has_nx && !disable_nx)
25829 __supported_pte_mask |= _PAGE_NX;
25830 else
25831+#endif
25832 __supported_pte_mask &= ~_PAGE_NX;
25833 }
25834
25835diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
25836index d6c0418..06a0ad5 100644
25837--- a/arch/x86/mm/tlb.c
25838+++ b/arch/x86/mm/tlb.c
25839@@ -65,7 +65,11 @@ void leave_mm(int cpu)
25840 BUG();
25841 cpumask_clear_cpu(cpu,
25842 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
25843+
25844+#ifndef CONFIG_PAX_PER_CPU_PGD
25845 load_cr3(swapper_pg_dir);
25846+#endif
25847+
25848 }
25849 EXPORT_SYMBOL_GPL(leave_mm);
25850
25851diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
25852index 877b9a1..a8ecf42 100644
25853--- a/arch/x86/net/bpf_jit.S
25854+++ b/arch/x86/net/bpf_jit.S
25855@@ -9,6 +9,7 @@
25856 */
25857 #include <linux/linkage.h>
25858 #include <asm/dwarf2.h>
25859+#include <asm/alternative-asm.h>
25860
25861 /*
25862 * Calling convention :
25863@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
25864 jle bpf_slow_path_word
25865 mov (SKBDATA,%rsi),%eax
25866 bswap %eax /* ntohl() */
25867+ pax_force_retaddr
25868 ret
25869
25870 sk_load_half:
25871@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
25872 jle bpf_slow_path_half
25873 movzwl (SKBDATA,%rsi),%eax
25874 rol $8,%ax # ntohs()
25875+ pax_force_retaddr
25876 ret
25877
25878 sk_load_byte:
25879@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
25880 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
25881 jle bpf_slow_path_byte
25882 movzbl (SKBDATA,%rsi),%eax
25883+ pax_force_retaddr
25884 ret
25885
25886 /**
25887@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
25888 movzbl (SKBDATA,%rsi),%ebx
25889 and $15,%bl
25890 shl $2,%bl
25891+ pax_force_retaddr
25892 ret
25893
25894 /* rsi contains offset and can be scratched */
25895@@ -109,6 +114,7 @@ bpf_slow_path_word:
25896 js bpf_error
25897 mov -12(%rbp),%eax
25898 bswap %eax
25899+ pax_force_retaddr
25900 ret
25901
25902 bpf_slow_path_half:
25903@@ -117,12 +123,14 @@ bpf_slow_path_half:
25904 mov -12(%rbp),%ax
25905 rol $8,%ax
25906 movzwl %ax,%eax
25907+ pax_force_retaddr
25908 ret
25909
25910 bpf_slow_path_byte:
25911 bpf_slow_path_common(1)
25912 js bpf_error
25913 movzbl -12(%rbp),%eax
25914+ pax_force_retaddr
25915 ret
25916
25917 bpf_slow_path_byte_msh:
25918@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
25919 and $15,%al
25920 shl $2,%al
25921 xchg %eax,%ebx
25922+ pax_force_retaddr
25923 ret
25924
25925 #define sk_negative_common(SIZE) \
25926@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
25927 sk_negative_common(4)
25928 mov (%rax), %eax
25929 bswap %eax
25930+ pax_force_retaddr
25931 ret
25932
25933 bpf_slow_path_half_neg:
25934@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
25935 mov (%rax),%ax
25936 rol $8,%ax
25937 movzwl %ax,%eax
25938+ pax_force_retaddr
25939 ret
25940
25941 bpf_slow_path_byte_neg:
25942@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
25943 .globl sk_load_byte_negative_offset
25944 sk_negative_common(1)
25945 movzbl (%rax), %eax
25946+ pax_force_retaddr
25947 ret
25948
25949 bpf_slow_path_byte_msh_neg:
25950@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
25951 and $15,%al
25952 shl $2,%al
25953 xchg %eax,%ebx
25954+ pax_force_retaddr
25955 ret
25956
25957 bpf_error:
25958@@ -197,4 +210,5 @@ bpf_error:
25959 xor %eax,%eax
25960 mov -8(%rbp),%rbx
25961 leaveq
25962+ pax_force_retaddr
25963 ret
25964diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
25965index 0597f95..a12c36e 100644
25966--- a/arch/x86/net/bpf_jit_comp.c
25967+++ b/arch/x86/net/bpf_jit_comp.c
25968@@ -120,6 +120,11 @@ static inline void bpf_flush_icache(void *start, void *end)
25969 set_fs(old_fs);
25970 }
25971
25972+struct bpf_jit_work {
25973+ struct work_struct work;
25974+ void *image;
25975+};
25976+
25977 #define CHOOSE_LOAD_FUNC(K, func) \
25978 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
25979
25980@@ -146,6 +151,10 @@ void bpf_jit_compile(struct sk_filter *fp)
25981 if (addrs == NULL)
25982 return;
25983
25984+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
25985+ if (!fp->work)
25986+ goto out;
25987+
25988 /* Before first pass, make a rough estimation of addrs[]
25989 * each bpf instruction is translated to less than 64 bytes
25990 */
25991@@ -589,17 +598,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25992 break;
25993 default:
25994 /* hmm, too complex filter, give up with jit compiler */
25995- goto out;
25996+ goto error;
25997 }
25998 ilen = prog - temp;
25999 if (image) {
26000 if (unlikely(proglen + ilen > oldproglen)) {
26001 pr_err("bpb_jit_compile fatal error\n");
26002- kfree(addrs);
26003- module_free(NULL, image);
26004- return;
26005+ module_free_exec(NULL, image);
26006+ goto error;
26007 }
26008+ pax_open_kernel();
26009 memcpy(image + proglen, temp, ilen);
26010+ pax_close_kernel();
26011 }
26012 proglen += ilen;
26013 addrs[i] = proglen;
26014@@ -620,11 +630,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26015 break;
26016 }
26017 if (proglen == oldproglen) {
26018- image = module_alloc(max_t(unsigned int,
26019- proglen,
26020- sizeof(struct work_struct)));
26021+ image = module_alloc_exec(proglen);
26022 if (!image)
26023- goto out;
26024+ goto error;
26025 }
26026 oldproglen = proglen;
26027 }
26028@@ -640,7 +648,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26029 bpf_flush_icache(image, image + proglen);
26030
26031 fp->bpf_func = (void *)image;
26032- }
26033+ } else
26034+error:
26035+ kfree(fp->work);
26036+
26037 out:
26038 kfree(addrs);
26039 return;
26040@@ -648,18 +659,20 @@ out:
26041
26042 static void jit_free_defer(struct work_struct *arg)
26043 {
26044- module_free(NULL, arg);
26045+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
26046+ kfree(arg);
26047 }
26048
26049 /* run from softirq, we must use a work_struct to call
26050- * module_free() from process context
26051+ * module_free_exec() from process context
26052 */
26053 void bpf_jit_free(struct sk_filter *fp)
26054 {
26055 if (fp->bpf_func != sk_run_filter) {
26056- struct work_struct *work = (struct work_struct *)fp->bpf_func;
26057+ struct work_struct *work = &fp->work->work;
26058
26059 INIT_WORK(work, jit_free_defer);
26060+ fp->work->image = fp->bpf_func;
26061 schedule_work(work);
26062 }
26063 }
26064diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26065index d6aa6e8..266395a 100644
26066--- a/arch/x86/oprofile/backtrace.c
26067+++ b/arch/x86/oprofile/backtrace.c
26068@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
26069 struct stack_frame_ia32 *fp;
26070 unsigned long bytes;
26071
26072- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26073+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26074 if (bytes != sizeof(bufhead))
26075 return NULL;
26076
26077- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
26078+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
26079
26080 oprofile_add_trace(bufhead[0].return_address);
26081
26082@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
26083 struct stack_frame bufhead[2];
26084 unsigned long bytes;
26085
26086- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26087+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26088 if (bytes != sizeof(bufhead))
26089 return NULL;
26090
26091@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26092 {
26093 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
26094
26095- if (!user_mode_vm(regs)) {
26096+ if (!user_mode(regs)) {
26097 unsigned long stack = kernel_stack_pointer(regs);
26098 if (depth)
26099 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26100diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
26101index 140942f..8a5cc55 100644
26102--- a/arch/x86/pci/mrst.c
26103+++ b/arch/x86/pci/mrst.c
26104@@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
26105 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
26106 pci_mmcfg_late_init();
26107 pcibios_enable_irq = mrst_pci_irq_enable;
26108- pci_root_ops = pci_mrst_ops;
26109+ pax_open_kernel();
26110+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
26111+ pax_close_kernel();
26112 pci_soc_mode = 1;
26113 /* Continue with standard init */
26114 return 1;
26115diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26116index da8fe05..7ee6704 100644
26117--- a/arch/x86/pci/pcbios.c
26118+++ b/arch/x86/pci/pcbios.c
26119@@ -79,50 +79,93 @@ union bios32 {
26120 static struct {
26121 unsigned long address;
26122 unsigned short segment;
26123-} bios32_indirect = { 0, __KERNEL_CS };
26124+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26125
26126 /*
26127 * Returns the entry point for the given service, NULL on error
26128 */
26129
26130-static unsigned long bios32_service(unsigned long service)
26131+static unsigned long __devinit bios32_service(unsigned long service)
26132 {
26133 unsigned char return_code; /* %al */
26134 unsigned long address; /* %ebx */
26135 unsigned long length; /* %ecx */
26136 unsigned long entry; /* %edx */
26137 unsigned long flags;
26138+ struct desc_struct d, *gdt;
26139
26140 local_irq_save(flags);
26141- __asm__("lcall *(%%edi); cld"
26142+
26143+ gdt = get_cpu_gdt_table(smp_processor_id());
26144+
26145+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26146+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26147+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26148+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26149+
26150+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26151 : "=a" (return_code),
26152 "=b" (address),
26153 "=c" (length),
26154 "=d" (entry)
26155 : "0" (service),
26156 "1" (0),
26157- "D" (&bios32_indirect));
26158+ "D" (&bios32_indirect),
26159+ "r"(__PCIBIOS_DS)
26160+ : "memory");
26161+
26162+ pax_open_kernel();
26163+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26164+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26165+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26166+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26167+ pax_close_kernel();
26168+
26169 local_irq_restore(flags);
26170
26171 switch (return_code) {
26172- case 0:
26173- return address + entry;
26174- case 0x80: /* Not present */
26175- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26176- return 0;
26177- default: /* Shouldn't happen */
26178- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26179- service, return_code);
26180+ case 0: {
26181+ int cpu;
26182+ unsigned char flags;
26183+
26184+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26185+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26186+ printk(KERN_WARNING "bios32_service: not valid\n");
26187 return 0;
26188+ }
26189+ address = address + PAGE_OFFSET;
26190+ length += 16UL; /* some BIOSs underreport this... */
26191+ flags = 4;
26192+ if (length >= 64*1024*1024) {
26193+ length >>= PAGE_SHIFT;
26194+ flags |= 8;
26195+ }
26196+
26197+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26198+ gdt = get_cpu_gdt_table(cpu);
26199+ pack_descriptor(&d, address, length, 0x9b, flags);
26200+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26201+ pack_descriptor(&d, address, length, 0x93, flags);
26202+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26203+ }
26204+ return entry;
26205+ }
26206+ case 0x80: /* Not present */
26207+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26208+ return 0;
26209+ default: /* Shouldn't happen */
26210+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26211+ service, return_code);
26212+ return 0;
26213 }
26214 }
26215
26216 static struct {
26217 unsigned long address;
26218 unsigned short segment;
26219-} pci_indirect = { 0, __KERNEL_CS };
26220+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26221
26222-static int pci_bios_present;
26223+static int pci_bios_present __read_only;
26224
26225 static int __devinit check_pcibios(void)
26226 {
26227@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
26228 unsigned long flags, pcibios_entry;
26229
26230 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26231- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26232+ pci_indirect.address = pcibios_entry;
26233
26234 local_irq_save(flags);
26235- __asm__(
26236- "lcall *(%%edi); cld\n\t"
26237+ __asm__("movw %w6, %%ds\n\t"
26238+ "lcall *%%ss:(%%edi); cld\n\t"
26239+ "push %%ss\n\t"
26240+ "pop %%ds\n\t"
26241 "jc 1f\n\t"
26242 "xor %%ah, %%ah\n"
26243 "1:"
26244@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
26245 "=b" (ebx),
26246 "=c" (ecx)
26247 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26248- "D" (&pci_indirect)
26249+ "D" (&pci_indirect),
26250+ "r" (__PCIBIOS_DS)
26251 : "memory");
26252 local_irq_restore(flags);
26253
26254@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26255
26256 switch (len) {
26257 case 1:
26258- __asm__("lcall *(%%esi); cld\n\t"
26259+ __asm__("movw %w6, %%ds\n\t"
26260+ "lcall *%%ss:(%%esi); cld\n\t"
26261+ "push %%ss\n\t"
26262+ "pop %%ds\n\t"
26263 "jc 1f\n\t"
26264 "xor %%ah, %%ah\n"
26265 "1:"
26266@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26267 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26268 "b" (bx),
26269 "D" ((long)reg),
26270- "S" (&pci_indirect));
26271+ "S" (&pci_indirect),
26272+ "r" (__PCIBIOS_DS));
26273 /*
26274 * Zero-extend the result beyond 8 bits, do not trust the
26275 * BIOS having done it:
26276@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26277 *value &= 0xff;
26278 break;
26279 case 2:
26280- __asm__("lcall *(%%esi); cld\n\t"
26281+ __asm__("movw %w6, %%ds\n\t"
26282+ "lcall *%%ss:(%%esi); cld\n\t"
26283+ "push %%ss\n\t"
26284+ "pop %%ds\n\t"
26285 "jc 1f\n\t"
26286 "xor %%ah, %%ah\n"
26287 "1:"
26288@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26289 : "1" (PCIBIOS_READ_CONFIG_WORD),
26290 "b" (bx),
26291 "D" ((long)reg),
26292- "S" (&pci_indirect));
26293+ "S" (&pci_indirect),
26294+ "r" (__PCIBIOS_DS));
26295 /*
26296 * Zero-extend the result beyond 16 bits, do not trust the
26297 * BIOS having done it:
26298@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26299 *value &= 0xffff;
26300 break;
26301 case 4:
26302- __asm__("lcall *(%%esi); cld\n\t"
26303+ __asm__("movw %w6, %%ds\n\t"
26304+ "lcall *%%ss:(%%esi); cld\n\t"
26305+ "push %%ss\n\t"
26306+ "pop %%ds\n\t"
26307 "jc 1f\n\t"
26308 "xor %%ah, %%ah\n"
26309 "1:"
26310@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26311 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26312 "b" (bx),
26313 "D" ((long)reg),
26314- "S" (&pci_indirect));
26315+ "S" (&pci_indirect),
26316+ "r" (__PCIBIOS_DS));
26317 break;
26318 }
26319
26320@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26321
26322 switch (len) {
26323 case 1:
26324- __asm__("lcall *(%%esi); cld\n\t"
26325+ __asm__("movw %w6, %%ds\n\t"
26326+ "lcall *%%ss:(%%esi); cld\n\t"
26327+ "push %%ss\n\t"
26328+ "pop %%ds\n\t"
26329 "jc 1f\n\t"
26330 "xor %%ah, %%ah\n"
26331 "1:"
26332@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26333 "c" (value),
26334 "b" (bx),
26335 "D" ((long)reg),
26336- "S" (&pci_indirect));
26337+ "S" (&pci_indirect),
26338+ "r" (__PCIBIOS_DS));
26339 break;
26340 case 2:
26341- __asm__("lcall *(%%esi); cld\n\t"
26342+ __asm__("movw %w6, %%ds\n\t"
26343+ "lcall *%%ss:(%%esi); cld\n\t"
26344+ "push %%ss\n\t"
26345+ "pop %%ds\n\t"
26346 "jc 1f\n\t"
26347 "xor %%ah, %%ah\n"
26348 "1:"
26349@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26350 "c" (value),
26351 "b" (bx),
26352 "D" ((long)reg),
26353- "S" (&pci_indirect));
26354+ "S" (&pci_indirect),
26355+ "r" (__PCIBIOS_DS));
26356 break;
26357 case 4:
26358- __asm__("lcall *(%%esi); cld\n\t"
26359+ __asm__("movw %w6, %%ds\n\t"
26360+ "lcall *%%ss:(%%esi); cld\n\t"
26361+ "push %%ss\n\t"
26362+ "pop %%ds\n\t"
26363 "jc 1f\n\t"
26364 "xor %%ah, %%ah\n"
26365 "1:"
26366@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26367 "c" (value),
26368 "b" (bx),
26369 "D" ((long)reg),
26370- "S" (&pci_indirect));
26371+ "S" (&pci_indirect),
26372+ "r" (__PCIBIOS_DS));
26373 break;
26374 }
26375
26376@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26377
26378 DBG("PCI: Fetching IRQ routing table... ");
26379 __asm__("push %%es\n\t"
26380+ "movw %w8, %%ds\n\t"
26381 "push %%ds\n\t"
26382 "pop %%es\n\t"
26383- "lcall *(%%esi); cld\n\t"
26384+ "lcall *%%ss:(%%esi); cld\n\t"
26385 "pop %%es\n\t"
26386+ "push %%ss\n\t"
26387+ "pop %%ds\n"
26388 "jc 1f\n\t"
26389 "xor %%ah, %%ah\n"
26390 "1:"
26391@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26392 "1" (0),
26393 "D" ((long) &opt),
26394 "S" (&pci_indirect),
26395- "m" (opt)
26396+ "m" (opt),
26397+ "r" (__PCIBIOS_DS)
26398 : "memory");
26399 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26400 if (ret & 0xff00)
26401@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26402 {
26403 int ret;
26404
26405- __asm__("lcall *(%%esi); cld\n\t"
26406+ __asm__("movw %w5, %%ds\n\t"
26407+ "lcall *%%ss:(%%esi); cld\n\t"
26408+ "push %%ss\n\t"
26409+ "pop %%ds\n"
26410 "jc 1f\n\t"
26411 "xor %%ah, %%ah\n"
26412 "1:"
26413@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26414 : "0" (PCIBIOS_SET_PCI_HW_INT),
26415 "b" ((dev->bus->number << 8) | dev->devfn),
26416 "c" ((irq << 8) | (pin + 10)),
26417- "S" (&pci_indirect));
26418+ "S" (&pci_indirect),
26419+ "r" (__PCIBIOS_DS));
26420 return !(ret & 0xff00);
26421 }
26422 EXPORT_SYMBOL(pcibios_set_irq_routing);
26423diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
26424index 40e4469..1ab536e 100644
26425--- a/arch/x86/platform/efi/efi_32.c
26426+++ b/arch/x86/platform/efi/efi_32.c
26427@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
26428 {
26429 struct desc_ptr gdt_descr;
26430
26431+#ifdef CONFIG_PAX_KERNEXEC
26432+ struct desc_struct d;
26433+#endif
26434+
26435 local_irq_save(efi_rt_eflags);
26436
26437 load_cr3(initial_page_table);
26438 __flush_tlb_all();
26439
26440+#ifdef CONFIG_PAX_KERNEXEC
26441+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
26442+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26443+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
26444+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26445+#endif
26446+
26447 gdt_descr.address = __pa(get_cpu_gdt_table(0));
26448 gdt_descr.size = GDT_SIZE - 1;
26449 load_gdt(&gdt_descr);
26450@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
26451 {
26452 struct desc_ptr gdt_descr;
26453
26454+#ifdef CONFIG_PAX_KERNEXEC
26455+ struct desc_struct d;
26456+
26457+ memset(&d, 0, sizeof d);
26458+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26459+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26460+#endif
26461+
26462 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
26463 gdt_descr.size = GDT_SIZE - 1;
26464 load_gdt(&gdt_descr);
26465diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
26466index fbe66e6..c5c0dd2 100644
26467--- a/arch/x86/platform/efi/efi_stub_32.S
26468+++ b/arch/x86/platform/efi/efi_stub_32.S
26469@@ -6,7 +6,9 @@
26470 */
26471
26472 #include <linux/linkage.h>
26473+#include <linux/init.h>
26474 #include <asm/page_types.h>
26475+#include <asm/segment.h>
26476
26477 /*
26478 * efi_call_phys(void *, ...) is a function with variable parameters.
26479@@ -20,7 +22,7 @@
26480 * service functions will comply with gcc calling convention, too.
26481 */
26482
26483-.text
26484+__INIT
26485 ENTRY(efi_call_phys)
26486 /*
26487 * 0. The function can only be called in Linux kernel. So CS has been
26488@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
26489 * The mapping of lower virtual memory has been created in prelog and
26490 * epilog.
26491 */
26492- movl $1f, %edx
26493- subl $__PAGE_OFFSET, %edx
26494- jmp *%edx
26495+ movl $(__KERNEXEC_EFI_DS), %edx
26496+ mov %edx, %ds
26497+ mov %edx, %es
26498+ mov %edx, %ss
26499+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
26500 1:
26501
26502 /*
26503@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
26504 * parameter 2, ..., param n. To make things easy, we save the return
26505 * address of efi_call_phys in a global variable.
26506 */
26507- popl %edx
26508- movl %edx, saved_return_addr
26509- /* get the function pointer into ECX*/
26510- popl %ecx
26511- movl %ecx, efi_rt_function_ptr
26512- movl $2f, %edx
26513- subl $__PAGE_OFFSET, %edx
26514- pushl %edx
26515+ popl (saved_return_addr)
26516+ popl (efi_rt_function_ptr)
26517
26518 /*
26519 * 3. Clear PG bit in %CR0.
26520@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
26521 /*
26522 * 5. Call the physical function.
26523 */
26524- jmp *%ecx
26525+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
26526
26527-2:
26528 /*
26529 * 6. After EFI runtime service returns, control will return to
26530 * following instruction. We'd better readjust stack pointer first.
26531@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
26532 movl %cr0, %edx
26533 orl $0x80000000, %edx
26534 movl %edx, %cr0
26535- jmp 1f
26536-1:
26537+
26538 /*
26539 * 8. Now restore the virtual mode from flat mode by
26540 * adding EIP with PAGE_OFFSET.
26541 */
26542- movl $1f, %edx
26543- jmp *%edx
26544+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
26545 1:
26546+ movl $(__KERNEL_DS), %edx
26547+ mov %edx, %ds
26548+ mov %edx, %es
26549+ mov %edx, %ss
26550
26551 /*
26552 * 9. Balance the stack. And because EAX contain the return value,
26553 * we'd better not clobber it.
26554 */
26555- leal efi_rt_function_ptr, %edx
26556- movl (%edx), %ecx
26557- pushl %ecx
26558+ pushl (efi_rt_function_ptr)
26559
26560 /*
26561- * 10. Push the saved return address onto the stack and return.
26562+ * 10. Return to the saved return address.
26563 */
26564- leal saved_return_addr, %edx
26565- movl (%edx), %ecx
26566- pushl %ecx
26567- ret
26568+ jmpl *(saved_return_addr)
26569 ENDPROC(efi_call_phys)
26570 .previous
26571
26572-.data
26573+__INITDATA
26574 saved_return_addr:
26575 .long 0
26576 efi_rt_function_ptr:
26577diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
26578index 4c07cca..2c8427d 100644
26579--- a/arch/x86/platform/efi/efi_stub_64.S
26580+++ b/arch/x86/platform/efi/efi_stub_64.S
26581@@ -7,6 +7,7 @@
26582 */
26583
26584 #include <linux/linkage.h>
26585+#include <asm/alternative-asm.h>
26586
26587 #define SAVE_XMM \
26588 mov %rsp, %rax; \
26589@@ -40,6 +41,7 @@ ENTRY(efi_call0)
26590 call *%rdi
26591 addq $32, %rsp
26592 RESTORE_XMM
26593+ pax_force_retaddr 0, 1
26594 ret
26595 ENDPROC(efi_call0)
26596
26597@@ -50,6 +52,7 @@ ENTRY(efi_call1)
26598 call *%rdi
26599 addq $32, %rsp
26600 RESTORE_XMM
26601+ pax_force_retaddr 0, 1
26602 ret
26603 ENDPROC(efi_call1)
26604
26605@@ -60,6 +63,7 @@ ENTRY(efi_call2)
26606 call *%rdi
26607 addq $32, %rsp
26608 RESTORE_XMM
26609+ pax_force_retaddr 0, 1
26610 ret
26611 ENDPROC(efi_call2)
26612
26613@@ -71,6 +75,7 @@ ENTRY(efi_call3)
26614 call *%rdi
26615 addq $32, %rsp
26616 RESTORE_XMM
26617+ pax_force_retaddr 0, 1
26618 ret
26619 ENDPROC(efi_call3)
26620
26621@@ -83,6 +88,7 @@ ENTRY(efi_call4)
26622 call *%rdi
26623 addq $32, %rsp
26624 RESTORE_XMM
26625+ pax_force_retaddr 0, 1
26626 ret
26627 ENDPROC(efi_call4)
26628
26629@@ -96,6 +102,7 @@ ENTRY(efi_call5)
26630 call *%rdi
26631 addq $48, %rsp
26632 RESTORE_XMM
26633+ pax_force_retaddr 0, 1
26634 ret
26635 ENDPROC(efi_call5)
26636
26637@@ -112,5 +119,6 @@ ENTRY(efi_call6)
26638 call *%rdi
26639 addq $48, %rsp
26640 RESTORE_XMM
26641+ pax_force_retaddr 0, 1
26642 ret
26643 ENDPROC(efi_call6)
26644diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
26645index e31bcd8..f12dc46 100644
26646--- a/arch/x86/platform/mrst/mrst.c
26647+++ b/arch/x86/platform/mrst/mrst.c
26648@@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
26649 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
26650 int sfi_mrtc_num;
26651
26652-static void mrst_power_off(void)
26653+static __noreturn void mrst_power_off(void)
26654 {
26655+ BUG();
26656 }
26657
26658-static void mrst_reboot(void)
26659+static __noreturn void mrst_reboot(void)
26660 {
26661 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
26662+ BUG();
26663 }
26664
26665 /* parse all the mtimer info to a static mtimer array */
26666diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26667index 218cdb1..fd55c08 100644
26668--- a/arch/x86/power/cpu.c
26669+++ b/arch/x86/power/cpu.c
26670@@ -132,7 +132,7 @@ static void do_fpu_end(void)
26671 static void fix_processor_context(void)
26672 {
26673 int cpu = smp_processor_id();
26674- struct tss_struct *t = &per_cpu(init_tss, cpu);
26675+ struct tss_struct *t = init_tss + cpu;
26676
26677 set_tss_desc(cpu, t); /*
26678 * This just modifies memory; should not be
26679@@ -142,7 +142,9 @@ static void fix_processor_context(void)
26680 */
26681
26682 #ifdef CONFIG_X86_64
26683+ pax_open_kernel();
26684 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26685+ pax_close_kernel();
26686
26687 syscall_init(); /* This sets MSR_*STAR and related */
26688 #endif
26689diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
26690index b685296..0180fa9 100644
26691--- a/arch/x86/tools/relocs.c
26692+++ b/arch/x86/tools/relocs.c
26693@@ -12,10 +12,13 @@
26694 #include <regex.h>
26695 #include <tools/le_byteshift.h>
26696
26697+#include "../../../include/generated/autoconf.h"
26698+
26699 static void die(char *fmt, ...);
26700
26701 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
26702 static Elf32_Ehdr ehdr;
26703+static Elf32_Phdr *phdr;
26704 static unsigned long reloc_count, reloc_idx;
26705 static unsigned long *relocs;
26706 static unsigned long reloc16_count, reloc16_idx;
26707@@ -323,9 +326,39 @@ static void read_ehdr(FILE *fp)
26708 }
26709 }
26710
26711+static void read_phdrs(FILE *fp)
26712+{
26713+ unsigned int i;
26714+
26715+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
26716+ if (!phdr) {
26717+ die("Unable to allocate %d program headers\n",
26718+ ehdr.e_phnum);
26719+ }
26720+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
26721+ die("Seek to %d failed: %s\n",
26722+ ehdr.e_phoff, strerror(errno));
26723+ }
26724+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
26725+ die("Cannot read ELF program headers: %s\n",
26726+ strerror(errno));
26727+ }
26728+ for(i = 0; i < ehdr.e_phnum; i++) {
26729+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
26730+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
26731+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
26732+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
26733+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
26734+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
26735+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
26736+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
26737+ }
26738+
26739+}
26740+
26741 static void read_shdrs(FILE *fp)
26742 {
26743- int i;
26744+ unsigned int i;
26745 Elf32_Shdr shdr;
26746
26747 secs = calloc(ehdr.e_shnum, sizeof(struct section));
26748@@ -360,7 +393,7 @@ static void read_shdrs(FILE *fp)
26749
26750 static void read_strtabs(FILE *fp)
26751 {
26752- int i;
26753+ unsigned int i;
26754 for (i = 0; i < ehdr.e_shnum; i++) {
26755 struct section *sec = &secs[i];
26756 if (sec->shdr.sh_type != SHT_STRTAB) {
26757@@ -385,7 +418,7 @@ static void read_strtabs(FILE *fp)
26758
26759 static void read_symtabs(FILE *fp)
26760 {
26761- int i,j;
26762+ unsigned int i,j;
26763 for (i = 0; i < ehdr.e_shnum; i++) {
26764 struct section *sec = &secs[i];
26765 if (sec->shdr.sh_type != SHT_SYMTAB) {
26766@@ -418,7 +451,9 @@ static void read_symtabs(FILE *fp)
26767
26768 static void read_relocs(FILE *fp)
26769 {
26770- int i,j;
26771+ unsigned int i,j;
26772+ uint32_t base;
26773+
26774 for (i = 0; i < ehdr.e_shnum; i++) {
26775 struct section *sec = &secs[i];
26776 if (sec->shdr.sh_type != SHT_REL) {
26777@@ -438,9 +473,22 @@ static void read_relocs(FILE *fp)
26778 die("Cannot read symbol table: %s\n",
26779 strerror(errno));
26780 }
26781+ base = 0;
26782+
26783+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
26784+ for (j = 0; j < ehdr.e_phnum; j++) {
26785+ if (phdr[j].p_type != PT_LOAD )
26786+ continue;
26787+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
26788+ continue;
26789+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
26790+ break;
26791+ }
26792+#endif
26793+
26794 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
26795 Elf32_Rel *rel = &sec->reltab[j];
26796- rel->r_offset = elf32_to_cpu(rel->r_offset);
26797+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
26798 rel->r_info = elf32_to_cpu(rel->r_info);
26799 }
26800 }
26801@@ -449,13 +497,13 @@ static void read_relocs(FILE *fp)
26802
26803 static void print_absolute_symbols(void)
26804 {
26805- int i;
26806+ unsigned int i;
26807 printf("Absolute symbols\n");
26808 printf(" Num: Value Size Type Bind Visibility Name\n");
26809 for (i = 0; i < ehdr.e_shnum; i++) {
26810 struct section *sec = &secs[i];
26811 char *sym_strtab;
26812- int j;
26813+ unsigned int j;
26814
26815 if (sec->shdr.sh_type != SHT_SYMTAB) {
26816 continue;
26817@@ -482,7 +530,7 @@ static void print_absolute_symbols(void)
26818
26819 static void print_absolute_relocs(void)
26820 {
26821- int i, printed = 0;
26822+ unsigned int i, printed = 0;
26823
26824 for (i = 0; i < ehdr.e_shnum; i++) {
26825 struct section *sec = &secs[i];
26826@@ -551,7 +599,7 @@ static void print_absolute_relocs(void)
26827 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
26828 int use_real_mode)
26829 {
26830- int i;
26831+ unsigned int i;
26832 /* Walk through the relocations */
26833 for (i = 0; i < ehdr.e_shnum; i++) {
26834 char *sym_strtab;
26835@@ -581,6 +629,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
26836 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
26837 r_type = ELF32_R_TYPE(rel->r_info);
26838
26839+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
26840+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
26841+ continue;
26842+
26843+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
26844+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
26845+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
26846+ continue;
26847+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
26848+ continue;
26849+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
26850+ continue;
26851+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
26852+ continue;
26853+#endif
26854+
26855 shn_abs = sym->st_shndx == SHN_ABS;
26856
26857 switch (r_type) {
26858@@ -674,7 +738,7 @@ static int write32(unsigned int v, FILE *f)
26859
26860 static void emit_relocs(int as_text, int use_real_mode)
26861 {
26862- int i;
26863+ unsigned int i;
26864 /* Count how many relocations I have and allocate space for them. */
26865 reloc_count = 0;
26866 walk_relocs(count_reloc, use_real_mode);
26867@@ -801,6 +865,7 @@ int main(int argc, char **argv)
26868 fname, strerror(errno));
26869 }
26870 read_ehdr(fp);
26871+ read_phdrs(fp);
26872 read_shdrs(fp);
26873 read_strtabs(fp);
26874 read_symtabs(fp);
26875diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
26876index fd14be1..e3c79c0 100644
26877--- a/arch/x86/vdso/Makefile
26878+++ b/arch/x86/vdso/Makefile
26879@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
26880 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
26881 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
26882
26883-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26884+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26885 GCOV_PROFILE := n
26886
26887 #
26888diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
26889index 66e6d93..587f435 100644
26890--- a/arch/x86/vdso/vdso32-setup.c
26891+++ b/arch/x86/vdso/vdso32-setup.c
26892@@ -25,6 +25,7 @@
26893 #include <asm/tlbflush.h>
26894 #include <asm/vdso.h>
26895 #include <asm/proto.h>
26896+#include <asm/mman.h>
26897
26898 enum {
26899 VDSO_DISABLED = 0,
26900@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26901 void enable_sep_cpu(void)
26902 {
26903 int cpu = get_cpu();
26904- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26905+ struct tss_struct *tss = init_tss + cpu;
26906
26907 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26908 put_cpu();
26909@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26910 gate_vma.vm_start = FIXADDR_USER_START;
26911 gate_vma.vm_end = FIXADDR_USER_END;
26912 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26913- gate_vma.vm_page_prot = __P101;
26914+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26915
26916 return 0;
26917 }
26918@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26919 if (compat)
26920 addr = VDSO_HIGH_BASE;
26921 else {
26922- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26923+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26924 if (IS_ERR_VALUE(addr)) {
26925 ret = addr;
26926 goto up_fail;
26927 }
26928 }
26929
26930- current->mm->context.vdso = (void *)addr;
26931+ current->mm->context.vdso = addr;
26932
26933 if (compat_uses_vma || !compat) {
26934 /*
26935@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26936 }
26937
26938 current_thread_info()->sysenter_return =
26939- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26940+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26941
26942 up_fail:
26943 if (ret)
26944- current->mm->context.vdso = NULL;
26945+ current->mm->context.vdso = 0;
26946
26947 up_write(&mm->mmap_sem);
26948
26949@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
26950
26951 const char *arch_vma_name(struct vm_area_struct *vma)
26952 {
26953- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26954+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26955 return "[vdso]";
26956+
26957+#ifdef CONFIG_PAX_SEGMEXEC
26958+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
26959+ return "[vdso]";
26960+#endif
26961+
26962 return NULL;
26963 }
26964
26965@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
26966 * Check to see if the corresponding task was created in compat vdso
26967 * mode.
26968 */
26969- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
26970+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
26971 return &gate_vma;
26972 return NULL;
26973 }
26974diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
26975index 00aaf04..4a26505 100644
26976--- a/arch/x86/vdso/vma.c
26977+++ b/arch/x86/vdso/vma.c
26978@@ -16,8 +16,6 @@
26979 #include <asm/vdso.h>
26980 #include <asm/page.h>
26981
26982-unsigned int __read_mostly vdso_enabled = 1;
26983-
26984 extern char vdso_start[], vdso_end[];
26985 extern unsigned short vdso_sync_cpuid;
26986
26987@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26988 * unaligned here as a result of stack start randomization.
26989 */
26990 addr = PAGE_ALIGN(addr);
26991- addr = align_addr(addr, NULL, ALIGN_VDSO);
26992
26993 return addr;
26994 }
26995@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
26996 unsigned size)
26997 {
26998 struct mm_struct *mm = current->mm;
26999- unsigned long addr;
27000+ unsigned long addr = 0;
27001 int ret;
27002
27003- if (!vdso_enabled)
27004- return 0;
27005-
27006 down_write(&mm->mmap_sem);
27007+
27008+#ifdef CONFIG_PAX_RANDMMAP
27009+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27010+#endif
27011+
27012 addr = vdso_addr(mm->start_stack, size);
27013+ addr = align_addr(addr, NULL, ALIGN_VDSO);
27014 addr = get_unmapped_area(NULL, addr, size, 0, 0);
27015 if (IS_ERR_VALUE(addr)) {
27016 ret = addr;
27017 goto up_fail;
27018 }
27019
27020- current->mm->context.vdso = (void *)addr;
27021+ mm->context.vdso = addr;
27022
27023 ret = install_special_mapping(mm, addr, size,
27024 VM_READ|VM_EXEC|
27025 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
27026 pages);
27027- if (ret) {
27028- current->mm->context.vdso = NULL;
27029- goto up_fail;
27030- }
27031+ if (ret)
27032+ mm->context.vdso = 0;
27033
27034 up_fail:
27035 up_write(&mm->mmap_sem);
27036@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27037 vdsox32_size);
27038 }
27039 #endif
27040-
27041-static __init int vdso_setup(char *s)
27042-{
27043- vdso_enabled = simple_strtoul(s, NULL, 0);
27044- return 0;
27045-}
27046-__setup("vdso=", vdso_setup);
27047diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
27048index 6c7f1e8..de96944 100644
27049--- a/arch/x86/xen/enlighten.c
27050+++ b/arch/x86/xen/enlighten.c
27051@@ -95,8 +95,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
27052
27053 struct shared_info xen_dummy_shared_info;
27054
27055-void *xen_initial_gdt;
27056-
27057 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
27058 __read_mostly int xen_have_vector_callback;
27059 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
27060@@ -1157,30 +1155,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
27061 #endif
27062 };
27063
27064-static void xen_reboot(int reason)
27065+static __noreturn void xen_reboot(int reason)
27066 {
27067 struct sched_shutdown r = { .reason = reason };
27068
27069- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
27070- BUG();
27071+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
27072+ BUG();
27073 }
27074
27075-static void xen_restart(char *msg)
27076+static __noreturn void xen_restart(char *msg)
27077 {
27078 xen_reboot(SHUTDOWN_reboot);
27079 }
27080
27081-static void xen_emergency_restart(void)
27082+static __noreturn void xen_emergency_restart(void)
27083 {
27084 xen_reboot(SHUTDOWN_reboot);
27085 }
27086
27087-static void xen_machine_halt(void)
27088+static __noreturn void xen_machine_halt(void)
27089 {
27090 xen_reboot(SHUTDOWN_poweroff);
27091 }
27092
27093-static void xen_machine_power_off(void)
27094+static __noreturn void xen_machine_power_off(void)
27095 {
27096 if (pm_power_off)
27097 pm_power_off();
27098@@ -1283,7 +1281,17 @@ asmlinkage void __init xen_start_kernel(void)
27099 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
27100
27101 /* Work out if we support NX */
27102- x86_configure_nx();
27103+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27104+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
27105+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
27106+ unsigned l, h;
27107+
27108+ __supported_pte_mask |= _PAGE_NX;
27109+ rdmsr(MSR_EFER, l, h);
27110+ l |= EFER_NX;
27111+ wrmsr(MSR_EFER, l, h);
27112+ }
27113+#endif
27114
27115 xen_setup_features();
27116
27117@@ -1314,13 +1322,6 @@ asmlinkage void __init xen_start_kernel(void)
27118
27119 machine_ops = xen_machine_ops;
27120
27121- /*
27122- * The only reliable way to retain the initial address of the
27123- * percpu gdt_page is to remember it here, so we can go and
27124- * mark it RW later, when the initial percpu area is freed.
27125- */
27126- xen_initial_gdt = &per_cpu(gdt_page, 0);
27127-
27128 xen_smp_init();
27129
27130 #ifdef CONFIG_ACPI_NUMA
27131diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
27132index 69f5857..0699dc5 100644
27133--- a/arch/x86/xen/mmu.c
27134+++ b/arch/x86/xen/mmu.c
27135@@ -1738,6 +1738,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27136 convert_pfn_mfn(init_level4_pgt);
27137 convert_pfn_mfn(level3_ident_pgt);
27138 convert_pfn_mfn(level3_kernel_pgt);
27139+ convert_pfn_mfn(level3_vmalloc_start_pgt);
27140+ convert_pfn_mfn(level3_vmalloc_end_pgt);
27141+ convert_pfn_mfn(level3_vmemmap_pgt);
27142
27143 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
27144 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
27145@@ -1756,7 +1759,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27146 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
27147 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
27148 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
27149+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
27150+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
27151+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
27152 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
27153+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
27154 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
27155 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
27156
27157@@ -1964,6 +1971,7 @@ static void __init xen_post_allocator_init(void)
27158 pv_mmu_ops.set_pud = xen_set_pud;
27159 #if PAGETABLE_LEVELS == 4
27160 pv_mmu_ops.set_pgd = xen_set_pgd;
27161+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27162 #endif
27163
27164 /* This will work as long as patching hasn't happened yet
27165@@ -2045,6 +2053,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
27166 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27167 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27168 .set_pgd = xen_set_pgd_hyper,
27169+ .set_pgd_batched = xen_set_pgd_hyper,
27170
27171 .alloc_pud = xen_alloc_pmd_init,
27172 .release_pud = xen_release_pmd_init,
27173diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
27174index 0503c0c..ceb2d16 100644
27175--- a/arch/x86/xen/smp.c
27176+++ b/arch/x86/xen/smp.c
27177@@ -215,11 +215,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
27178 {
27179 BUG_ON(smp_processor_id() != 0);
27180 native_smp_prepare_boot_cpu();
27181-
27182- /* We've switched to the "real" per-cpu gdt, so make sure the
27183- old memory can be recycled */
27184- make_lowmem_page_readwrite(xen_initial_gdt);
27185-
27186 xen_filter_cpu_maps();
27187 xen_setup_vcpu_info_placement();
27188 }
27189@@ -296,12 +291,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
27190 gdt = get_cpu_gdt_table(cpu);
27191
27192 ctxt->flags = VGCF_IN_KERNEL;
27193- ctxt->user_regs.ds = __USER_DS;
27194- ctxt->user_regs.es = __USER_DS;
27195+ ctxt->user_regs.ds = __KERNEL_DS;
27196+ ctxt->user_regs.es = __KERNEL_DS;
27197 ctxt->user_regs.ss = __KERNEL_DS;
27198 #ifdef CONFIG_X86_32
27199 ctxt->user_regs.fs = __KERNEL_PERCPU;
27200- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27201+ savesegment(gs, ctxt->user_regs.gs);
27202 #else
27203 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27204 #endif
27205@@ -352,13 +347,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
27206 int rc;
27207
27208 per_cpu(current_task, cpu) = idle;
27209+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27210 #ifdef CONFIG_X86_32
27211 irq_ctx_init(cpu);
27212 #else
27213 clear_tsk_thread_flag(idle, TIF_FORK);
27214- per_cpu(kernel_stack, cpu) =
27215- (unsigned long)task_stack_page(idle) -
27216- KERNEL_STACK_OFFSET + THREAD_SIZE;
27217+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27218 #endif
27219 xen_setup_runstate_info(cpu);
27220 xen_setup_timer(cpu);
27221diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27222index b040b0e..8cc4fe0 100644
27223--- a/arch/x86/xen/xen-asm_32.S
27224+++ b/arch/x86/xen/xen-asm_32.S
27225@@ -83,14 +83,14 @@ ENTRY(xen_iret)
27226 ESP_OFFSET=4 # bytes pushed onto stack
27227
27228 /*
27229- * Store vcpu_info pointer for easy access. Do it this way to
27230- * avoid having to reload %fs
27231+ * Store vcpu_info pointer for easy access.
27232 */
27233 #ifdef CONFIG_SMP
27234- GET_THREAD_INFO(%eax)
27235- movl TI_cpu(%eax), %eax
27236- movl __per_cpu_offset(,%eax,4), %eax
27237- mov xen_vcpu(%eax), %eax
27238+ push %fs
27239+ mov $(__KERNEL_PERCPU), %eax
27240+ mov %eax, %fs
27241+ mov PER_CPU_VAR(xen_vcpu), %eax
27242+ pop %fs
27243 #else
27244 movl xen_vcpu, %eax
27245 #endif
27246diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27247index aaa7291..3f77960 100644
27248--- a/arch/x86/xen/xen-head.S
27249+++ b/arch/x86/xen/xen-head.S
27250@@ -19,6 +19,17 @@ ENTRY(startup_xen)
27251 #ifdef CONFIG_X86_32
27252 mov %esi,xen_start_info
27253 mov $init_thread_union+THREAD_SIZE,%esp
27254+#ifdef CONFIG_SMP
27255+ movl $cpu_gdt_table,%edi
27256+ movl $__per_cpu_load,%eax
27257+ movw %ax,__KERNEL_PERCPU + 2(%edi)
27258+ rorl $16,%eax
27259+ movb %al,__KERNEL_PERCPU + 4(%edi)
27260+ movb %ah,__KERNEL_PERCPU + 7(%edi)
27261+ movl $__per_cpu_end - 1,%eax
27262+ subl $__per_cpu_start,%eax
27263+ movw %ax,__KERNEL_PERCPU + 0(%edi)
27264+#endif
27265 #else
27266 mov %rsi,xen_start_info
27267 mov $init_thread_union+THREAD_SIZE,%rsp
27268diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27269index b095739..8c17bcd 100644
27270--- a/arch/x86/xen/xen-ops.h
27271+++ b/arch/x86/xen/xen-ops.h
27272@@ -10,8 +10,6 @@
27273 extern const char xen_hypervisor_callback[];
27274 extern const char xen_failsafe_callback[];
27275
27276-extern void *xen_initial_gdt;
27277-
27278 struct trap_info;
27279 void xen_copy_trap_info(struct trap_info *traps);
27280
27281diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
27282index 525bd3d..ef888b1 100644
27283--- a/arch/xtensa/variants/dc232b/include/variant/core.h
27284+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
27285@@ -119,9 +119,9 @@
27286 ----------------------------------------------------------------------*/
27287
27288 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
27289-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
27290 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
27291 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
27292+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27293
27294 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
27295 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
27296diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
27297index 2f33760..835e50a 100644
27298--- a/arch/xtensa/variants/fsf/include/variant/core.h
27299+++ b/arch/xtensa/variants/fsf/include/variant/core.h
27300@@ -11,6 +11,7 @@
27301 #ifndef _XTENSA_CORE_H
27302 #define _XTENSA_CORE_H
27303
27304+#include <linux/const.h>
27305
27306 /****************************************************************************
27307 Parameters Useful for Any Code, USER or PRIVILEGED
27308@@ -112,9 +113,9 @@
27309 ----------------------------------------------------------------------*/
27310
27311 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27312-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27313 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27314 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27315+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27316
27317 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
27318 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
27319diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
27320index af00795..2bb8105 100644
27321--- a/arch/xtensa/variants/s6000/include/variant/core.h
27322+++ b/arch/xtensa/variants/s6000/include/variant/core.h
27323@@ -11,6 +11,7 @@
27324 #ifndef _XTENSA_CORE_CONFIGURATION_H
27325 #define _XTENSA_CORE_CONFIGURATION_H
27326
27327+#include <linux/const.h>
27328
27329 /****************************************************************************
27330 Parameters Useful for Any Code, USER or PRIVILEGED
27331@@ -118,9 +119,9 @@
27332 ----------------------------------------------------------------------*/
27333
27334 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27335-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27336 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27337 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27338+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27339
27340 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
27341 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
27342diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27343index 58916af..9cb880b 100644
27344--- a/block/blk-iopoll.c
27345+++ b/block/blk-iopoll.c
27346@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27347 }
27348 EXPORT_SYMBOL(blk_iopoll_complete);
27349
27350-static void blk_iopoll_softirq(struct softirq_action *h)
27351+static void blk_iopoll_softirq(void)
27352 {
27353 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27354 int rearm = 0, budget = blk_iopoll_budget;
27355diff --git a/block/blk-map.c b/block/blk-map.c
27356index 623e1cd..ca1e109 100644
27357--- a/block/blk-map.c
27358+++ b/block/blk-map.c
27359@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27360 if (!len || !kbuf)
27361 return -EINVAL;
27362
27363- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
27364+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
27365 if (do_copy)
27366 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27367 else
27368diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27369index 467c8de..4bddc6d 100644
27370--- a/block/blk-softirq.c
27371+++ b/block/blk-softirq.c
27372@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27373 * Softirq action handler - move entries to local list and loop over them
27374 * while passing them to the queue registered handler.
27375 */
27376-static void blk_done_softirq(struct softirq_action *h)
27377+static void blk_done_softirq(void)
27378 {
27379 struct list_head *cpu_list, local_list;
27380
27381diff --git a/block/bsg.c b/block/bsg.c
27382index ff64ae3..593560c 100644
27383--- a/block/bsg.c
27384+++ b/block/bsg.c
27385@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27386 struct sg_io_v4 *hdr, struct bsg_device *bd,
27387 fmode_t has_write_perm)
27388 {
27389+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27390+ unsigned char *cmdptr;
27391+
27392 if (hdr->request_len > BLK_MAX_CDB) {
27393 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27394 if (!rq->cmd)
27395 return -ENOMEM;
27396- }
27397+ cmdptr = rq->cmd;
27398+ } else
27399+ cmdptr = tmpcmd;
27400
27401- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
27402+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27403 hdr->request_len))
27404 return -EFAULT;
27405
27406+ if (cmdptr != rq->cmd)
27407+ memcpy(rq->cmd, cmdptr, hdr->request_len);
27408+
27409 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27410 if (blk_verify_command(rq->cmd, has_write_perm))
27411 return -EPERM;
27412diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27413index 7c668c8..db3521c 100644
27414--- a/block/compat_ioctl.c
27415+++ b/block/compat_ioctl.c
27416@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27417 err |= __get_user(f->spec1, &uf->spec1);
27418 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27419 err |= __get_user(name, &uf->name);
27420- f->name = compat_ptr(name);
27421+ f->name = (void __force_kernel *)compat_ptr(name);
27422 if (err) {
27423 err = -EFAULT;
27424 goto out;
27425diff --git a/block/partitions/efi.c b/block/partitions/efi.c
27426index 6296b40..417c00f 100644
27427--- a/block/partitions/efi.c
27428+++ b/block/partitions/efi.c
27429@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
27430 if (!gpt)
27431 return NULL;
27432
27433+ if (!le32_to_cpu(gpt->num_partition_entries))
27434+ return NULL;
27435+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
27436+ if (!pte)
27437+ return NULL;
27438+
27439 count = le32_to_cpu(gpt->num_partition_entries) *
27440 le32_to_cpu(gpt->sizeof_partition_entry);
27441- if (!count)
27442- return NULL;
27443- pte = kzalloc(count, GFP_KERNEL);
27444- if (!pte)
27445- return NULL;
27446-
27447 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
27448 (u8 *) pte,
27449 count) < count) {
27450diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27451index 260fa80..e8f3caf 100644
27452--- a/block/scsi_ioctl.c
27453+++ b/block/scsi_ioctl.c
27454@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
27455 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27456 struct sg_io_hdr *hdr, fmode_t mode)
27457 {
27458- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27459+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27460+ unsigned char *cmdptr;
27461+
27462+ if (rq->cmd != rq->__cmd)
27463+ cmdptr = rq->cmd;
27464+ else
27465+ cmdptr = tmpcmd;
27466+
27467+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27468 return -EFAULT;
27469+
27470+ if (cmdptr != rq->cmd)
27471+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27472+
27473 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27474 return -EPERM;
27475
27476@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27477 int err;
27478 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27479 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27480+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27481+ unsigned char *cmdptr;
27482
27483 if (!sic)
27484 return -EINVAL;
27485@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27486 */
27487 err = -EFAULT;
27488 rq->cmd_len = cmdlen;
27489- if (copy_from_user(rq->cmd, sic->data, cmdlen))
27490+
27491+ if (rq->cmd != rq->__cmd)
27492+ cmdptr = rq->cmd;
27493+ else
27494+ cmdptr = tmpcmd;
27495+
27496+ if (copy_from_user(cmdptr, sic->data, cmdlen))
27497 goto error;
27498
27499+ if (rq->cmd != cmdptr)
27500+ memcpy(rq->cmd, cmdptr, cmdlen);
27501+
27502 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27503 goto error;
27504
27505diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27506index 671d4d6..5f24030 100644
27507--- a/crypto/cryptd.c
27508+++ b/crypto/cryptd.c
27509@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
27510
27511 struct cryptd_blkcipher_request_ctx {
27512 crypto_completion_t complete;
27513-};
27514+} __no_const;
27515
27516 struct cryptd_hash_ctx {
27517 struct crypto_shash *child;
27518@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
27519
27520 struct cryptd_aead_request_ctx {
27521 crypto_completion_t complete;
27522-};
27523+} __no_const;
27524
27525 static void cryptd_queue_worker(struct work_struct *work);
27526
27527diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
27528index e6defd8..c26a225 100644
27529--- a/drivers/acpi/apei/cper.c
27530+++ b/drivers/acpi/apei/cper.c
27531@@ -38,12 +38,12 @@
27532 */
27533 u64 cper_next_record_id(void)
27534 {
27535- static atomic64_t seq;
27536+ static atomic64_unchecked_t seq;
27537
27538- if (!atomic64_read(&seq))
27539- atomic64_set(&seq, ((u64)get_seconds()) << 32);
27540+ if (!atomic64_read_unchecked(&seq))
27541+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
27542
27543- return atomic64_inc_return(&seq);
27544+ return atomic64_inc_return_unchecked(&seq);
27545 }
27546 EXPORT_SYMBOL_GPL(cper_next_record_id);
27547
27548diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
27549index 7586544..636a2f0 100644
27550--- a/drivers/acpi/ec_sys.c
27551+++ b/drivers/acpi/ec_sys.c
27552@@ -12,6 +12,7 @@
27553 #include <linux/acpi.h>
27554 #include <linux/debugfs.h>
27555 #include <linux/module.h>
27556+#include <linux/uaccess.h>
27557 #include "internal.h"
27558
27559 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
27560@@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27561 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
27562 */
27563 unsigned int size = EC_SPACE_SIZE;
27564- u8 *data = (u8 *) buf;
27565+ u8 data;
27566 loff_t init_off = *off;
27567 int err = 0;
27568
27569@@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27570 size = count;
27571
27572 while (size) {
27573- err = ec_read(*off, &data[*off - init_off]);
27574+ err = ec_read(*off, &data);
27575 if (err)
27576 return err;
27577+ if (put_user(data, &buf[*off - init_off]))
27578+ return -EFAULT;
27579 *off += 1;
27580 size--;
27581 }
27582@@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27583
27584 unsigned int size = count;
27585 loff_t init_off = *off;
27586- u8 *data = (u8 *) buf;
27587 int err = 0;
27588
27589 if (*off >= EC_SPACE_SIZE)
27590@@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27591 }
27592
27593 while (size) {
27594- u8 byte_write = data[*off - init_off];
27595+ u8 byte_write;
27596+ if (get_user(byte_write, &buf[*off - init_off]))
27597+ return -EFAULT;
27598 err = ec_write(*off, byte_write);
27599 if (err)
27600 return err;
27601diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27602index 251c7b62..000462d 100644
27603--- a/drivers/acpi/proc.c
27604+++ b/drivers/acpi/proc.c
27605@@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
27606 size_t count, loff_t * ppos)
27607 {
27608 struct list_head *node, *next;
27609- char strbuf[5];
27610- char str[5] = "";
27611- unsigned int len = count;
27612+ char strbuf[5] = {0};
27613
27614- if (len > 4)
27615- len = 4;
27616- if (len < 0)
27617+ if (count > 4)
27618+ count = 4;
27619+ if (copy_from_user(strbuf, buffer, count))
27620 return -EFAULT;
27621-
27622- if (copy_from_user(strbuf, buffer, len))
27623- return -EFAULT;
27624- strbuf[len] = '\0';
27625- sscanf(strbuf, "%s", str);
27626+ strbuf[count] = '\0';
27627
27628 mutex_lock(&acpi_device_lock);
27629 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27630@@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
27631 if (!dev->wakeup.flags.valid)
27632 continue;
27633
27634- if (!strncmp(dev->pnp.bus_id, str, 4)) {
27635+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27636 if (device_can_wakeup(&dev->dev)) {
27637 bool enable = !device_may_wakeup(&dev->dev);
27638 device_set_wakeup_enable(&dev->dev, enable);
27639diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
27640index 0734086..3ad3e4c 100644
27641--- a/drivers/acpi/processor_driver.c
27642+++ b/drivers/acpi/processor_driver.c
27643@@ -556,7 +556,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27644 return 0;
27645 #endif
27646
27647- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27648+ BUG_ON(pr->id >= nr_cpu_ids);
27649
27650 /*
27651 * Buggy BIOS check
27652diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27653index d31ee55..8363a8b 100644
27654--- a/drivers/ata/libata-core.c
27655+++ b/drivers/ata/libata-core.c
27656@@ -4742,7 +4742,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27657 struct ata_port *ap;
27658 unsigned int tag;
27659
27660- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27661+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27662 ap = qc->ap;
27663
27664 qc->flags = 0;
27665@@ -4758,7 +4758,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27666 struct ata_port *ap;
27667 struct ata_link *link;
27668
27669- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27670+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27671 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27672 ap = qc->ap;
27673 link = qc->dev->link;
27674@@ -5822,6 +5822,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27675 return;
27676
27677 spin_lock(&lock);
27678+ pax_open_kernel();
27679
27680 for (cur = ops->inherits; cur; cur = cur->inherits) {
27681 void **inherit = (void **)cur;
27682@@ -5835,8 +5836,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27683 if (IS_ERR(*pp))
27684 *pp = NULL;
27685
27686- ops->inherits = NULL;
27687+ *(struct ata_port_operations **)&ops->inherits = NULL;
27688
27689+ pax_close_kernel();
27690 spin_unlock(&lock);
27691 }
27692
27693diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
27694index 3239517..343b5f6 100644
27695--- a/drivers/ata/pata_arasan_cf.c
27696+++ b/drivers/ata/pata_arasan_cf.c
27697@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
27698 /* Handle platform specific quirks */
27699 if (pdata->quirk) {
27700 if (pdata->quirk & CF_BROKEN_PIO) {
27701- ap->ops->set_piomode = NULL;
27702+ pax_open_kernel();
27703+ *(void **)&ap->ops->set_piomode = NULL;
27704+ pax_close_kernel();
27705 ap->pio_mask = 0;
27706 }
27707 if (pdata->quirk & CF_BROKEN_MWDMA)
27708diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
27709index f9b983a..887b9d8 100644
27710--- a/drivers/atm/adummy.c
27711+++ b/drivers/atm/adummy.c
27712@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
27713 vcc->pop(vcc, skb);
27714 else
27715 dev_kfree_skb_any(skb);
27716- atomic_inc(&vcc->stats->tx);
27717+ atomic_inc_unchecked(&vcc->stats->tx);
27718
27719 return 0;
27720 }
27721diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
27722index f8f41e0..1f987dd 100644
27723--- a/drivers/atm/ambassador.c
27724+++ b/drivers/atm/ambassador.c
27725@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
27726 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
27727
27728 // VC layer stats
27729- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27730+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27731
27732 // free the descriptor
27733 kfree (tx_descr);
27734@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27735 dump_skb ("<<<", vc, skb);
27736
27737 // VC layer stats
27738- atomic_inc(&atm_vcc->stats->rx);
27739+ atomic_inc_unchecked(&atm_vcc->stats->rx);
27740 __net_timestamp(skb);
27741 // end of our responsibility
27742 atm_vcc->push (atm_vcc, skb);
27743@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27744 } else {
27745 PRINTK (KERN_INFO, "dropped over-size frame");
27746 // should we count this?
27747- atomic_inc(&atm_vcc->stats->rx_drop);
27748+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27749 }
27750
27751 } else {
27752@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
27753 }
27754
27755 if (check_area (skb->data, skb->len)) {
27756- atomic_inc(&atm_vcc->stats->tx_err);
27757+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
27758 return -ENOMEM; // ?
27759 }
27760
27761diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
27762index b22d71c..d6e1049 100644
27763--- a/drivers/atm/atmtcp.c
27764+++ b/drivers/atm/atmtcp.c
27765@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27766 if (vcc->pop) vcc->pop(vcc,skb);
27767 else dev_kfree_skb(skb);
27768 if (dev_data) return 0;
27769- atomic_inc(&vcc->stats->tx_err);
27770+ atomic_inc_unchecked(&vcc->stats->tx_err);
27771 return -ENOLINK;
27772 }
27773 size = skb->len+sizeof(struct atmtcp_hdr);
27774@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27775 if (!new_skb) {
27776 if (vcc->pop) vcc->pop(vcc,skb);
27777 else dev_kfree_skb(skb);
27778- atomic_inc(&vcc->stats->tx_err);
27779+ atomic_inc_unchecked(&vcc->stats->tx_err);
27780 return -ENOBUFS;
27781 }
27782 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
27783@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27784 if (vcc->pop) vcc->pop(vcc,skb);
27785 else dev_kfree_skb(skb);
27786 out_vcc->push(out_vcc,new_skb);
27787- atomic_inc(&vcc->stats->tx);
27788- atomic_inc(&out_vcc->stats->rx);
27789+ atomic_inc_unchecked(&vcc->stats->tx);
27790+ atomic_inc_unchecked(&out_vcc->stats->rx);
27791 return 0;
27792 }
27793
27794@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27795 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
27796 read_unlock(&vcc_sklist_lock);
27797 if (!out_vcc) {
27798- atomic_inc(&vcc->stats->tx_err);
27799+ atomic_inc_unchecked(&vcc->stats->tx_err);
27800 goto done;
27801 }
27802 skb_pull(skb,sizeof(struct atmtcp_hdr));
27803@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27804 __net_timestamp(new_skb);
27805 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
27806 out_vcc->push(out_vcc,new_skb);
27807- atomic_inc(&vcc->stats->tx);
27808- atomic_inc(&out_vcc->stats->rx);
27809+ atomic_inc_unchecked(&vcc->stats->tx);
27810+ atomic_inc_unchecked(&out_vcc->stats->rx);
27811 done:
27812 if (vcc->pop) vcc->pop(vcc,skb);
27813 else dev_kfree_skb(skb);
27814diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
27815index 2059ee4..faf51c7 100644
27816--- a/drivers/atm/eni.c
27817+++ b/drivers/atm/eni.c
27818@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
27819 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
27820 vcc->dev->number);
27821 length = 0;
27822- atomic_inc(&vcc->stats->rx_err);
27823+ atomic_inc_unchecked(&vcc->stats->rx_err);
27824 }
27825 else {
27826 length = ATM_CELL_SIZE-1; /* no HEC */
27827@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27828 size);
27829 }
27830 eff = length = 0;
27831- atomic_inc(&vcc->stats->rx_err);
27832+ atomic_inc_unchecked(&vcc->stats->rx_err);
27833 }
27834 else {
27835 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
27836@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27837 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
27838 vcc->dev->number,vcc->vci,length,size << 2,descr);
27839 length = eff = 0;
27840- atomic_inc(&vcc->stats->rx_err);
27841+ atomic_inc_unchecked(&vcc->stats->rx_err);
27842 }
27843 }
27844 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
27845@@ -767,7 +767,7 @@ rx_dequeued++;
27846 vcc->push(vcc,skb);
27847 pushed++;
27848 }
27849- atomic_inc(&vcc->stats->rx);
27850+ atomic_inc_unchecked(&vcc->stats->rx);
27851 }
27852 wake_up(&eni_dev->rx_wait);
27853 }
27854@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
27855 PCI_DMA_TODEVICE);
27856 if (vcc->pop) vcc->pop(vcc,skb);
27857 else dev_kfree_skb_irq(skb);
27858- atomic_inc(&vcc->stats->tx);
27859+ atomic_inc_unchecked(&vcc->stats->tx);
27860 wake_up(&eni_dev->tx_wait);
27861 dma_complete++;
27862 }
27863@@ -1567,7 +1567,7 @@ tx_complete++;
27864 /*--------------------------------- entries ---------------------------------*/
27865
27866
27867-static const char *media_name[] __devinitdata = {
27868+static const char *media_name[] __devinitconst = {
27869 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
27870 "UTP", "05?", "06?", "07?", /* 4- 7 */
27871 "TAXI","09?", "10?", "11?", /* 8-11 */
27872diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
27873index 86fed1b..6dc4721 100644
27874--- a/drivers/atm/firestream.c
27875+++ b/drivers/atm/firestream.c
27876@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
27877 }
27878 }
27879
27880- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27881+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27882
27883 fs_dprintk (FS_DEBUG_TXMEM, "i");
27884 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
27885@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
27886 #endif
27887 skb_put (skb, qe->p1 & 0xffff);
27888 ATM_SKB(skb)->vcc = atm_vcc;
27889- atomic_inc(&atm_vcc->stats->rx);
27890+ atomic_inc_unchecked(&atm_vcc->stats->rx);
27891 __net_timestamp(skb);
27892 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
27893 atm_vcc->push (atm_vcc, skb);
27894@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
27895 kfree (pe);
27896 }
27897 if (atm_vcc)
27898- atomic_inc(&atm_vcc->stats->rx_drop);
27899+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27900 break;
27901 case 0x1f: /* Reassembly abort: no buffers. */
27902 /* Silently increment error counter. */
27903 if (atm_vcc)
27904- atomic_inc(&atm_vcc->stats->rx_drop);
27905+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27906 break;
27907 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
27908 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
27909diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
27910index 361f5ae..7fc552d 100644
27911--- a/drivers/atm/fore200e.c
27912+++ b/drivers/atm/fore200e.c
27913@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
27914 #endif
27915 /* check error condition */
27916 if (*entry->status & STATUS_ERROR)
27917- atomic_inc(&vcc->stats->tx_err);
27918+ atomic_inc_unchecked(&vcc->stats->tx_err);
27919 else
27920- atomic_inc(&vcc->stats->tx);
27921+ atomic_inc_unchecked(&vcc->stats->tx);
27922 }
27923 }
27924
27925@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
27926 if (skb == NULL) {
27927 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
27928
27929- atomic_inc(&vcc->stats->rx_drop);
27930+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27931 return -ENOMEM;
27932 }
27933
27934@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
27935
27936 dev_kfree_skb_any(skb);
27937
27938- atomic_inc(&vcc->stats->rx_drop);
27939+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27940 return -ENOMEM;
27941 }
27942
27943 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
27944
27945 vcc->push(vcc, skb);
27946- atomic_inc(&vcc->stats->rx);
27947+ atomic_inc_unchecked(&vcc->stats->rx);
27948
27949 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
27950
27951@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
27952 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
27953 fore200e->atm_dev->number,
27954 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
27955- atomic_inc(&vcc->stats->rx_err);
27956+ atomic_inc_unchecked(&vcc->stats->rx_err);
27957 }
27958 }
27959
27960@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
27961 goto retry_here;
27962 }
27963
27964- atomic_inc(&vcc->stats->tx_err);
27965+ atomic_inc_unchecked(&vcc->stats->tx_err);
27966
27967 fore200e->tx_sat++;
27968 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
27969diff --git a/drivers/atm/he.c b/drivers/atm/he.c
27970index b182c2f..1c6fa8a 100644
27971--- a/drivers/atm/he.c
27972+++ b/drivers/atm/he.c
27973@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27974
27975 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
27976 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
27977- atomic_inc(&vcc->stats->rx_drop);
27978+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27979 goto return_host_buffers;
27980 }
27981
27982@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27983 RBRQ_LEN_ERR(he_dev->rbrq_head)
27984 ? "LEN_ERR" : "",
27985 vcc->vpi, vcc->vci);
27986- atomic_inc(&vcc->stats->rx_err);
27987+ atomic_inc_unchecked(&vcc->stats->rx_err);
27988 goto return_host_buffers;
27989 }
27990
27991@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27992 vcc->push(vcc, skb);
27993 spin_lock(&he_dev->global_lock);
27994
27995- atomic_inc(&vcc->stats->rx);
27996+ atomic_inc_unchecked(&vcc->stats->rx);
27997
27998 return_host_buffers:
27999 ++pdus_assembled;
28000@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
28001 tpd->vcc->pop(tpd->vcc, tpd->skb);
28002 else
28003 dev_kfree_skb_any(tpd->skb);
28004- atomic_inc(&tpd->vcc->stats->tx_err);
28005+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
28006 }
28007 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
28008 return;
28009@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28010 vcc->pop(vcc, skb);
28011 else
28012 dev_kfree_skb_any(skb);
28013- atomic_inc(&vcc->stats->tx_err);
28014+ atomic_inc_unchecked(&vcc->stats->tx_err);
28015 return -EINVAL;
28016 }
28017
28018@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28019 vcc->pop(vcc, skb);
28020 else
28021 dev_kfree_skb_any(skb);
28022- atomic_inc(&vcc->stats->tx_err);
28023+ atomic_inc_unchecked(&vcc->stats->tx_err);
28024 return -EINVAL;
28025 }
28026 #endif
28027@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28028 vcc->pop(vcc, skb);
28029 else
28030 dev_kfree_skb_any(skb);
28031- atomic_inc(&vcc->stats->tx_err);
28032+ atomic_inc_unchecked(&vcc->stats->tx_err);
28033 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28034 return -ENOMEM;
28035 }
28036@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28037 vcc->pop(vcc, skb);
28038 else
28039 dev_kfree_skb_any(skb);
28040- atomic_inc(&vcc->stats->tx_err);
28041+ atomic_inc_unchecked(&vcc->stats->tx_err);
28042 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28043 return -ENOMEM;
28044 }
28045@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28046 __enqueue_tpd(he_dev, tpd, cid);
28047 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28048
28049- atomic_inc(&vcc->stats->tx);
28050+ atomic_inc_unchecked(&vcc->stats->tx);
28051
28052 return 0;
28053 }
28054diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
28055index 75fd691..2d20b14 100644
28056--- a/drivers/atm/horizon.c
28057+++ b/drivers/atm/horizon.c
28058@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
28059 {
28060 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
28061 // VC layer stats
28062- atomic_inc(&vcc->stats->rx);
28063+ atomic_inc_unchecked(&vcc->stats->rx);
28064 __net_timestamp(skb);
28065 // end of our responsibility
28066 vcc->push (vcc, skb);
28067@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
28068 dev->tx_iovec = NULL;
28069
28070 // VC layer stats
28071- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
28072+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
28073
28074 // free the skb
28075 hrz_kfree_skb (skb);
28076diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
28077index 1c05212..c28e200 100644
28078--- a/drivers/atm/idt77252.c
28079+++ b/drivers/atm/idt77252.c
28080@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
28081 else
28082 dev_kfree_skb(skb);
28083
28084- atomic_inc(&vcc->stats->tx);
28085+ atomic_inc_unchecked(&vcc->stats->tx);
28086 }
28087
28088 atomic_dec(&scq->used);
28089@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28090 if ((sb = dev_alloc_skb(64)) == NULL) {
28091 printk("%s: Can't allocate buffers for aal0.\n",
28092 card->name);
28093- atomic_add(i, &vcc->stats->rx_drop);
28094+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
28095 break;
28096 }
28097 if (!atm_charge(vcc, sb->truesize)) {
28098 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
28099 card->name);
28100- atomic_add(i - 1, &vcc->stats->rx_drop);
28101+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
28102 dev_kfree_skb(sb);
28103 break;
28104 }
28105@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28106 ATM_SKB(sb)->vcc = vcc;
28107 __net_timestamp(sb);
28108 vcc->push(vcc, sb);
28109- atomic_inc(&vcc->stats->rx);
28110+ atomic_inc_unchecked(&vcc->stats->rx);
28111
28112 cell += ATM_CELL_PAYLOAD;
28113 }
28114@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28115 "(CDC: %08x)\n",
28116 card->name, len, rpp->len, readl(SAR_REG_CDC));
28117 recycle_rx_pool_skb(card, rpp);
28118- atomic_inc(&vcc->stats->rx_err);
28119+ atomic_inc_unchecked(&vcc->stats->rx_err);
28120 return;
28121 }
28122 if (stat & SAR_RSQE_CRC) {
28123 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
28124 recycle_rx_pool_skb(card, rpp);
28125- atomic_inc(&vcc->stats->rx_err);
28126+ atomic_inc_unchecked(&vcc->stats->rx_err);
28127 return;
28128 }
28129 if (skb_queue_len(&rpp->queue) > 1) {
28130@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28131 RXPRINTK("%s: Can't alloc RX skb.\n",
28132 card->name);
28133 recycle_rx_pool_skb(card, rpp);
28134- atomic_inc(&vcc->stats->rx_err);
28135+ atomic_inc_unchecked(&vcc->stats->rx_err);
28136 return;
28137 }
28138 if (!atm_charge(vcc, skb->truesize)) {
28139@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28140 __net_timestamp(skb);
28141
28142 vcc->push(vcc, skb);
28143- atomic_inc(&vcc->stats->rx);
28144+ atomic_inc_unchecked(&vcc->stats->rx);
28145
28146 return;
28147 }
28148@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28149 __net_timestamp(skb);
28150
28151 vcc->push(vcc, skb);
28152- atomic_inc(&vcc->stats->rx);
28153+ atomic_inc_unchecked(&vcc->stats->rx);
28154
28155 if (skb->truesize > SAR_FB_SIZE_3)
28156 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
28157@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
28158 if (vcc->qos.aal != ATM_AAL0) {
28159 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
28160 card->name, vpi, vci);
28161- atomic_inc(&vcc->stats->rx_drop);
28162+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28163 goto drop;
28164 }
28165
28166 if ((sb = dev_alloc_skb(64)) == NULL) {
28167 printk("%s: Can't allocate buffers for AAL0.\n",
28168 card->name);
28169- atomic_inc(&vcc->stats->rx_err);
28170+ atomic_inc_unchecked(&vcc->stats->rx_err);
28171 goto drop;
28172 }
28173
28174@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
28175 ATM_SKB(sb)->vcc = vcc;
28176 __net_timestamp(sb);
28177 vcc->push(vcc, sb);
28178- atomic_inc(&vcc->stats->rx);
28179+ atomic_inc_unchecked(&vcc->stats->rx);
28180
28181 drop:
28182 skb_pull(queue, 64);
28183@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28184
28185 if (vc == NULL) {
28186 printk("%s: NULL connection in send().\n", card->name);
28187- atomic_inc(&vcc->stats->tx_err);
28188+ atomic_inc_unchecked(&vcc->stats->tx_err);
28189 dev_kfree_skb(skb);
28190 return -EINVAL;
28191 }
28192 if (!test_bit(VCF_TX, &vc->flags)) {
28193 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
28194- atomic_inc(&vcc->stats->tx_err);
28195+ atomic_inc_unchecked(&vcc->stats->tx_err);
28196 dev_kfree_skb(skb);
28197 return -EINVAL;
28198 }
28199@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28200 break;
28201 default:
28202 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
28203- atomic_inc(&vcc->stats->tx_err);
28204+ atomic_inc_unchecked(&vcc->stats->tx_err);
28205 dev_kfree_skb(skb);
28206 return -EINVAL;
28207 }
28208
28209 if (skb_shinfo(skb)->nr_frags != 0) {
28210 printk("%s: No scatter-gather yet.\n", card->name);
28211- atomic_inc(&vcc->stats->tx_err);
28212+ atomic_inc_unchecked(&vcc->stats->tx_err);
28213 dev_kfree_skb(skb);
28214 return -EINVAL;
28215 }
28216@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28217
28218 err = queue_skb(card, vc, skb, oam);
28219 if (err) {
28220- atomic_inc(&vcc->stats->tx_err);
28221+ atomic_inc_unchecked(&vcc->stats->tx_err);
28222 dev_kfree_skb(skb);
28223 return err;
28224 }
28225@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
28226 skb = dev_alloc_skb(64);
28227 if (!skb) {
28228 printk("%s: Out of memory in send_oam().\n", card->name);
28229- atomic_inc(&vcc->stats->tx_err);
28230+ atomic_inc_unchecked(&vcc->stats->tx_err);
28231 return -ENOMEM;
28232 }
28233 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
28234diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
28235index d438601..8b98495 100644
28236--- a/drivers/atm/iphase.c
28237+++ b/drivers/atm/iphase.c
28238@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
28239 status = (u_short) (buf_desc_ptr->desc_mode);
28240 if (status & (RX_CER | RX_PTE | RX_OFL))
28241 {
28242- atomic_inc(&vcc->stats->rx_err);
28243+ atomic_inc_unchecked(&vcc->stats->rx_err);
28244 IF_ERR(printk("IA: bad packet, dropping it");)
28245 if (status & RX_CER) {
28246 IF_ERR(printk(" cause: packet CRC error\n");)
28247@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
28248 len = dma_addr - buf_addr;
28249 if (len > iadev->rx_buf_sz) {
28250 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
28251- atomic_inc(&vcc->stats->rx_err);
28252+ atomic_inc_unchecked(&vcc->stats->rx_err);
28253 goto out_free_desc;
28254 }
28255
28256@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28257 ia_vcc = INPH_IA_VCC(vcc);
28258 if (ia_vcc == NULL)
28259 {
28260- atomic_inc(&vcc->stats->rx_err);
28261+ atomic_inc_unchecked(&vcc->stats->rx_err);
28262 atm_return(vcc, skb->truesize);
28263 dev_kfree_skb_any(skb);
28264 goto INCR_DLE;
28265@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28266 if ((length > iadev->rx_buf_sz) || (length >
28267 (skb->len - sizeof(struct cpcs_trailer))))
28268 {
28269- atomic_inc(&vcc->stats->rx_err);
28270+ atomic_inc_unchecked(&vcc->stats->rx_err);
28271 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
28272 length, skb->len);)
28273 atm_return(vcc, skb->truesize);
28274@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28275
28276 IF_RX(printk("rx_dle_intr: skb push");)
28277 vcc->push(vcc,skb);
28278- atomic_inc(&vcc->stats->rx);
28279+ atomic_inc_unchecked(&vcc->stats->rx);
28280 iadev->rx_pkt_cnt++;
28281 }
28282 INCR_DLE:
28283@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
28284 {
28285 struct k_sonet_stats *stats;
28286 stats = &PRIV(_ia_dev[board])->sonet_stats;
28287- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
28288- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
28289- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
28290- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
28291- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
28292- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
28293- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
28294- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
28295- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
28296+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
28297+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
28298+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
28299+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
28300+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
28301+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
28302+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
28303+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
28304+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
28305 }
28306 ia_cmds.status = 0;
28307 break;
28308@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28309 if ((desc == 0) || (desc > iadev->num_tx_desc))
28310 {
28311 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
28312- atomic_inc(&vcc->stats->tx);
28313+ atomic_inc_unchecked(&vcc->stats->tx);
28314 if (vcc->pop)
28315 vcc->pop(vcc, skb);
28316 else
28317@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28318 ATM_DESC(skb) = vcc->vci;
28319 skb_queue_tail(&iadev->tx_dma_q, skb);
28320
28321- atomic_inc(&vcc->stats->tx);
28322+ atomic_inc_unchecked(&vcc->stats->tx);
28323 iadev->tx_pkt_cnt++;
28324 /* Increment transaction counter */
28325 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
28326
28327 #if 0
28328 /* add flow control logic */
28329- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
28330+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
28331 if (iavcc->vc_desc_cnt > 10) {
28332 vcc->tx_quota = vcc->tx_quota * 3 / 4;
28333 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
28334diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
28335index 68c7588..7036683 100644
28336--- a/drivers/atm/lanai.c
28337+++ b/drivers/atm/lanai.c
28338@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
28339 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
28340 lanai_endtx(lanai, lvcc);
28341 lanai_free_skb(lvcc->tx.atmvcc, skb);
28342- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
28343+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
28344 }
28345
28346 /* Try to fill the buffer - don't call unless there is backlog */
28347@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
28348 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
28349 __net_timestamp(skb);
28350 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
28351- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
28352+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
28353 out:
28354 lvcc->rx.buf.ptr = end;
28355 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
28356@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28357 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
28358 "vcc %d\n", lanai->number, (unsigned int) s, vci);
28359 lanai->stats.service_rxnotaal5++;
28360- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28361+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28362 return 0;
28363 }
28364 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
28365@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28366 int bytes;
28367 read_unlock(&vcc_sklist_lock);
28368 DPRINTK("got trashed rx pdu on vci %d\n", vci);
28369- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28370+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28371 lvcc->stats.x.aal5.service_trash++;
28372 bytes = (SERVICE_GET_END(s) * 16) -
28373 (((unsigned long) lvcc->rx.buf.ptr) -
28374@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28375 }
28376 if (s & SERVICE_STREAM) {
28377 read_unlock(&vcc_sklist_lock);
28378- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28379+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28380 lvcc->stats.x.aal5.service_stream++;
28381 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
28382 "PDU on VCI %d!\n", lanai->number, vci);
28383@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28384 return 0;
28385 }
28386 DPRINTK("got rx crc error on vci %d\n", vci);
28387- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28388+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28389 lvcc->stats.x.aal5.service_rxcrc++;
28390 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
28391 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
28392diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
28393index 1c70c45..300718d 100644
28394--- a/drivers/atm/nicstar.c
28395+++ b/drivers/atm/nicstar.c
28396@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28397 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
28398 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
28399 card->index);
28400- atomic_inc(&vcc->stats->tx_err);
28401+ atomic_inc_unchecked(&vcc->stats->tx_err);
28402 dev_kfree_skb_any(skb);
28403 return -EINVAL;
28404 }
28405@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28406 if (!vc->tx) {
28407 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
28408 card->index);
28409- atomic_inc(&vcc->stats->tx_err);
28410+ atomic_inc_unchecked(&vcc->stats->tx_err);
28411 dev_kfree_skb_any(skb);
28412 return -EINVAL;
28413 }
28414@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28415 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
28416 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
28417 card->index);
28418- atomic_inc(&vcc->stats->tx_err);
28419+ atomic_inc_unchecked(&vcc->stats->tx_err);
28420 dev_kfree_skb_any(skb);
28421 return -EINVAL;
28422 }
28423
28424 if (skb_shinfo(skb)->nr_frags != 0) {
28425 printk("nicstar%d: No scatter-gather yet.\n", card->index);
28426- atomic_inc(&vcc->stats->tx_err);
28427+ atomic_inc_unchecked(&vcc->stats->tx_err);
28428 dev_kfree_skb_any(skb);
28429 return -EINVAL;
28430 }
28431@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28432 }
28433
28434 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
28435- atomic_inc(&vcc->stats->tx_err);
28436+ atomic_inc_unchecked(&vcc->stats->tx_err);
28437 dev_kfree_skb_any(skb);
28438 return -EIO;
28439 }
28440- atomic_inc(&vcc->stats->tx);
28441+ atomic_inc_unchecked(&vcc->stats->tx);
28442
28443 return 0;
28444 }
28445@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28446 printk
28447 ("nicstar%d: Can't allocate buffers for aal0.\n",
28448 card->index);
28449- atomic_add(i, &vcc->stats->rx_drop);
28450+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
28451 break;
28452 }
28453 if (!atm_charge(vcc, sb->truesize)) {
28454 RXPRINTK
28455 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
28456 card->index);
28457- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28458+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28459 dev_kfree_skb_any(sb);
28460 break;
28461 }
28462@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28463 ATM_SKB(sb)->vcc = vcc;
28464 __net_timestamp(sb);
28465 vcc->push(vcc, sb);
28466- atomic_inc(&vcc->stats->rx);
28467+ atomic_inc_unchecked(&vcc->stats->rx);
28468 cell += ATM_CELL_PAYLOAD;
28469 }
28470
28471@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28472 if (iovb == NULL) {
28473 printk("nicstar%d: Out of iovec buffers.\n",
28474 card->index);
28475- atomic_inc(&vcc->stats->rx_drop);
28476+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28477 recycle_rx_buf(card, skb);
28478 return;
28479 }
28480@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28481 small or large buffer itself. */
28482 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
28483 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
28484- atomic_inc(&vcc->stats->rx_err);
28485+ atomic_inc_unchecked(&vcc->stats->rx_err);
28486 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28487 NS_MAX_IOVECS);
28488 NS_PRV_IOVCNT(iovb) = 0;
28489@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28490 ("nicstar%d: Expected a small buffer, and this is not one.\n",
28491 card->index);
28492 which_list(card, skb);
28493- atomic_inc(&vcc->stats->rx_err);
28494+ atomic_inc_unchecked(&vcc->stats->rx_err);
28495 recycle_rx_buf(card, skb);
28496 vc->rx_iov = NULL;
28497 recycle_iov_buf(card, iovb);
28498@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28499 ("nicstar%d: Expected a large buffer, and this is not one.\n",
28500 card->index);
28501 which_list(card, skb);
28502- atomic_inc(&vcc->stats->rx_err);
28503+ atomic_inc_unchecked(&vcc->stats->rx_err);
28504 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28505 NS_PRV_IOVCNT(iovb));
28506 vc->rx_iov = NULL;
28507@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28508 printk(" - PDU size mismatch.\n");
28509 else
28510 printk(".\n");
28511- atomic_inc(&vcc->stats->rx_err);
28512+ atomic_inc_unchecked(&vcc->stats->rx_err);
28513 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28514 NS_PRV_IOVCNT(iovb));
28515 vc->rx_iov = NULL;
28516@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28517 /* skb points to a small buffer */
28518 if (!atm_charge(vcc, skb->truesize)) {
28519 push_rxbufs(card, skb);
28520- atomic_inc(&vcc->stats->rx_drop);
28521+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28522 } else {
28523 skb_put(skb, len);
28524 dequeue_sm_buf(card, skb);
28525@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28526 ATM_SKB(skb)->vcc = vcc;
28527 __net_timestamp(skb);
28528 vcc->push(vcc, skb);
28529- atomic_inc(&vcc->stats->rx);
28530+ atomic_inc_unchecked(&vcc->stats->rx);
28531 }
28532 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
28533 struct sk_buff *sb;
28534@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28535 if (len <= NS_SMBUFSIZE) {
28536 if (!atm_charge(vcc, sb->truesize)) {
28537 push_rxbufs(card, sb);
28538- atomic_inc(&vcc->stats->rx_drop);
28539+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28540 } else {
28541 skb_put(sb, len);
28542 dequeue_sm_buf(card, sb);
28543@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28544 ATM_SKB(sb)->vcc = vcc;
28545 __net_timestamp(sb);
28546 vcc->push(vcc, sb);
28547- atomic_inc(&vcc->stats->rx);
28548+ atomic_inc_unchecked(&vcc->stats->rx);
28549 }
28550
28551 push_rxbufs(card, skb);
28552@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28553
28554 if (!atm_charge(vcc, skb->truesize)) {
28555 push_rxbufs(card, skb);
28556- atomic_inc(&vcc->stats->rx_drop);
28557+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28558 } else {
28559 dequeue_lg_buf(card, skb);
28560 #ifdef NS_USE_DESTRUCTORS
28561@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28562 ATM_SKB(skb)->vcc = vcc;
28563 __net_timestamp(skb);
28564 vcc->push(vcc, skb);
28565- atomic_inc(&vcc->stats->rx);
28566+ atomic_inc_unchecked(&vcc->stats->rx);
28567 }
28568
28569 push_rxbufs(card, sb);
28570@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28571 printk
28572 ("nicstar%d: Out of huge buffers.\n",
28573 card->index);
28574- atomic_inc(&vcc->stats->rx_drop);
28575+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28576 recycle_iovec_rx_bufs(card,
28577 (struct iovec *)
28578 iovb->data,
28579@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28580 card->hbpool.count++;
28581 } else
28582 dev_kfree_skb_any(hb);
28583- atomic_inc(&vcc->stats->rx_drop);
28584+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28585 } else {
28586 /* Copy the small buffer to the huge buffer */
28587 sb = (struct sk_buff *)iov->iov_base;
28588@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28589 #endif /* NS_USE_DESTRUCTORS */
28590 __net_timestamp(hb);
28591 vcc->push(vcc, hb);
28592- atomic_inc(&vcc->stats->rx);
28593+ atomic_inc_unchecked(&vcc->stats->rx);
28594 }
28595 }
28596
28597diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
28598index 9851093..adb2b1e 100644
28599--- a/drivers/atm/solos-pci.c
28600+++ b/drivers/atm/solos-pci.c
28601@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
28602 }
28603 atm_charge(vcc, skb->truesize);
28604 vcc->push(vcc, skb);
28605- atomic_inc(&vcc->stats->rx);
28606+ atomic_inc_unchecked(&vcc->stats->rx);
28607 break;
28608
28609 case PKT_STATUS:
28610@@ -1009,7 +1009,7 @@ static uint32_t fpga_tx(struct solos_card *card)
28611 vcc = SKB_CB(oldskb)->vcc;
28612
28613 if (vcc) {
28614- atomic_inc(&vcc->stats->tx);
28615+ atomic_inc_unchecked(&vcc->stats->tx);
28616 solos_pop(vcc, oldskb);
28617 } else
28618 dev_kfree_skb_irq(oldskb);
28619diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
28620index 0215934..ce9f5b1 100644
28621--- a/drivers/atm/suni.c
28622+++ b/drivers/atm/suni.c
28623@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
28624
28625
28626 #define ADD_LIMITED(s,v) \
28627- atomic_add((v),&stats->s); \
28628- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
28629+ atomic_add_unchecked((v),&stats->s); \
28630+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
28631
28632
28633 static void suni_hz(unsigned long from_timer)
28634diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
28635index 5120a96..e2572bd 100644
28636--- a/drivers/atm/uPD98402.c
28637+++ b/drivers/atm/uPD98402.c
28638@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
28639 struct sonet_stats tmp;
28640 int error = 0;
28641
28642- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28643+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28644 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
28645 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
28646 if (zero && !error) {
28647@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
28648
28649
28650 #define ADD_LIMITED(s,v) \
28651- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
28652- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
28653- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28654+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
28655+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
28656+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28657
28658
28659 static void stat_event(struct atm_dev *dev)
28660@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
28661 if (reason & uPD98402_INT_PFM) stat_event(dev);
28662 if (reason & uPD98402_INT_PCO) {
28663 (void) GET(PCOCR); /* clear interrupt cause */
28664- atomic_add(GET(HECCT),
28665+ atomic_add_unchecked(GET(HECCT),
28666 &PRIV(dev)->sonet_stats.uncorr_hcs);
28667 }
28668 if ((reason & uPD98402_INT_RFO) &&
28669@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
28670 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
28671 uPD98402_INT_LOS),PIMR); /* enable them */
28672 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
28673- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28674- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
28675- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
28676+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28677+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
28678+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
28679 return 0;
28680 }
28681
28682diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
28683index abe4e20..83c4727 100644
28684--- a/drivers/atm/zatm.c
28685+++ b/drivers/atm/zatm.c
28686@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28687 }
28688 if (!size) {
28689 dev_kfree_skb_irq(skb);
28690- if (vcc) atomic_inc(&vcc->stats->rx_err);
28691+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
28692 continue;
28693 }
28694 if (!atm_charge(vcc,skb->truesize)) {
28695@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28696 skb->len = size;
28697 ATM_SKB(skb)->vcc = vcc;
28698 vcc->push(vcc,skb);
28699- atomic_inc(&vcc->stats->rx);
28700+ atomic_inc_unchecked(&vcc->stats->rx);
28701 }
28702 zout(pos & 0xffff,MTA(mbx));
28703 #if 0 /* probably a stupid idea */
28704@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
28705 skb_queue_head(&zatm_vcc->backlog,skb);
28706 break;
28707 }
28708- atomic_inc(&vcc->stats->tx);
28709+ atomic_inc_unchecked(&vcc->stats->tx);
28710 wake_up(&zatm_vcc->tx_wait);
28711 }
28712
28713diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
28714index 8493536..31adee0 100644
28715--- a/drivers/base/devtmpfs.c
28716+++ b/drivers/base/devtmpfs.c
28717@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
28718 if (!thread)
28719 return 0;
28720
28721- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
28722+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
28723 if (err)
28724 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
28725 else
28726diff --git a/drivers/base/node.c b/drivers/base/node.c
28727index 90aa2a1..af1a177 100644
28728--- a/drivers/base/node.c
28729+++ b/drivers/base/node.c
28730@@ -592,11 +592,9 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
28731 {
28732 int n;
28733
28734- n = nodelist_scnprintf(buf, PAGE_SIZE, node_states[state]);
28735- if (n > 0 && PAGE_SIZE > n + 1) {
28736- *(buf + n++) = '\n';
28737- *(buf + n++) = '\0';
28738- }
28739+ n = nodelist_scnprintf(buf, PAGE_SIZE-2, node_states[state]);
28740+ buf[n++] = '\n';
28741+ buf[n] = '\0';
28742 return n;
28743 }
28744
28745diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
28746index 2a3e581..3d6a73f 100644
28747--- a/drivers/base/power/wakeup.c
28748+++ b/drivers/base/power/wakeup.c
28749@@ -30,14 +30,14 @@ bool events_check_enabled;
28750 * They need to be modified together atomically, so it's better to use one
28751 * atomic variable to hold them both.
28752 */
28753-static atomic_t combined_event_count = ATOMIC_INIT(0);
28754+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
28755
28756 #define IN_PROGRESS_BITS (sizeof(int) * 4)
28757 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
28758
28759 static void split_counters(unsigned int *cnt, unsigned int *inpr)
28760 {
28761- unsigned int comb = atomic_read(&combined_event_count);
28762+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
28763
28764 *cnt = (comb >> IN_PROGRESS_BITS);
28765 *inpr = comb & MAX_IN_PROGRESS;
28766@@ -379,7 +379,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
28767 ws->last_time = ktime_get();
28768
28769 /* Increment the counter of events in progress. */
28770- atomic_inc(&combined_event_count);
28771+ atomic_inc_unchecked(&combined_event_count);
28772 }
28773
28774 /**
28775@@ -475,7 +475,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
28776 * Increment the counter of registered wakeup events and decrement the
28777 * couter of wakeup events in progress simultaneously.
28778 */
28779- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
28780+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
28781 }
28782
28783 /**
28784diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
28785index b0f553b..77b928b 100644
28786--- a/drivers/block/cciss.c
28787+++ b/drivers/block/cciss.c
28788@@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
28789 int err;
28790 u32 cp;
28791
28792+ memset(&arg64, 0, sizeof(arg64));
28793+
28794 err = 0;
28795 err |=
28796 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
28797@@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
28798 while (!list_empty(&h->reqQ)) {
28799 c = list_entry(h->reqQ.next, CommandList_struct, list);
28800 /* can't do anything if fifo is full */
28801- if ((h->access.fifo_full(h))) {
28802+ if ((h->access->fifo_full(h))) {
28803 dev_warn(&h->pdev->dev, "fifo full\n");
28804 break;
28805 }
28806@@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
28807 h->Qdepth--;
28808
28809 /* Tell the controller execute command */
28810- h->access.submit_command(h, c);
28811+ h->access->submit_command(h, c);
28812
28813 /* Put job onto the completed Q */
28814 addQ(&h->cmpQ, c);
28815@@ -3443,17 +3445,17 @@ startio:
28816
28817 static inline unsigned long get_next_completion(ctlr_info_t *h)
28818 {
28819- return h->access.command_completed(h);
28820+ return h->access->command_completed(h);
28821 }
28822
28823 static inline int interrupt_pending(ctlr_info_t *h)
28824 {
28825- return h->access.intr_pending(h);
28826+ return h->access->intr_pending(h);
28827 }
28828
28829 static inline long interrupt_not_for_us(ctlr_info_t *h)
28830 {
28831- return ((h->access.intr_pending(h) == 0) ||
28832+ return ((h->access->intr_pending(h) == 0) ||
28833 (h->interrupts_enabled == 0));
28834 }
28835
28836@@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
28837 u32 a;
28838
28839 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
28840- return h->access.command_completed(h);
28841+ return h->access->command_completed(h);
28842
28843 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
28844 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
28845@@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
28846 trans_support & CFGTBL_Trans_use_short_tags);
28847
28848 /* Change the access methods to the performant access methods */
28849- h->access = SA5_performant_access;
28850+ h->access = &SA5_performant_access;
28851 h->transMethod = CFGTBL_Trans_Performant;
28852
28853 return;
28854@@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
28855 if (prod_index < 0)
28856 return -ENODEV;
28857 h->product_name = products[prod_index].product_name;
28858- h->access = *(products[prod_index].access);
28859+ h->access = products[prod_index].access;
28860
28861 if (cciss_board_disabled(h)) {
28862 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
28863@@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
28864 }
28865
28866 /* make sure the board interrupts are off */
28867- h->access.set_intr_mask(h, CCISS_INTR_OFF);
28868+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
28869 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
28870 if (rc)
28871 goto clean2;
28872@@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
28873 * fake ones to scoop up any residual completions.
28874 */
28875 spin_lock_irqsave(&h->lock, flags);
28876- h->access.set_intr_mask(h, CCISS_INTR_OFF);
28877+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
28878 spin_unlock_irqrestore(&h->lock, flags);
28879 free_irq(h->intr[h->intr_mode], h);
28880 rc = cciss_request_irq(h, cciss_msix_discard_completions,
28881@@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
28882 dev_info(&h->pdev->dev, "Board READY.\n");
28883 dev_info(&h->pdev->dev,
28884 "Waiting for stale completions to drain.\n");
28885- h->access.set_intr_mask(h, CCISS_INTR_ON);
28886+ h->access->set_intr_mask(h, CCISS_INTR_ON);
28887 msleep(10000);
28888- h->access.set_intr_mask(h, CCISS_INTR_OFF);
28889+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
28890
28891 rc = controller_reset_failed(h->cfgtable);
28892 if (rc)
28893@@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
28894 cciss_scsi_setup(h);
28895
28896 /* Turn the interrupts on so we can service requests */
28897- h->access.set_intr_mask(h, CCISS_INTR_ON);
28898+ h->access->set_intr_mask(h, CCISS_INTR_ON);
28899
28900 /* Get the firmware version */
28901 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
28902@@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
28903 kfree(flush_buf);
28904 if (return_code != IO_OK)
28905 dev_warn(&h->pdev->dev, "Error flushing cache\n");
28906- h->access.set_intr_mask(h, CCISS_INTR_OFF);
28907+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
28908 free_irq(h->intr[h->intr_mode], h);
28909 }
28910
28911diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
28912index 7fda30e..eb5dfe0 100644
28913--- a/drivers/block/cciss.h
28914+++ b/drivers/block/cciss.h
28915@@ -101,7 +101,7 @@ struct ctlr_info
28916 /* information about each logical volume */
28917 drive_info_struct *drv[CISS_MAX_LUN];
28918
28919- struct access_method access;
28920+ struct access_method *access;
28921
28922 /* queue and queue Info */
28923 struct list_head reqQ;
28924diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
28925index 9125bbe..eede5c8 100644
28926--- a/drivers/block/cpqarray.c
28927+++ b/drivers/block/cpqarray.c
28928@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
28929 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
28930 goto Enomem4;
28931 }
28932- hba[i]->access.set_intr_mask(hba[i], 0);
28933+ hba[i]->access->set_intr_mask(hba[i], 0);
28934 if (request_irq(hba[i]->intr, do_ida_intr,
28935 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
28936 {
28937@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
28938 add_timer(&hba[i]->timer);
28939
28940 /* Enable IRQ now that spinlock and rate limit timer are set up */
28941- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
28942+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
28943
28944 for(j=0; j<NWD; j++) {
28945 struct gendisk *disk = ida_gendisk[i][j];
28946@@ -694,7 +694,7 @@ DBGINFO(
28947 for(i=0; i<NR_PRODUCTS; i++) {
28948 if (board_id == products[i].board_id) {
28949 c->product_name = products[i].product_name;
28950- c->access = *(products[i].access);
28951+ c->access = products[i].access;
28952 break;
28953 }
28954 }
28955@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
28956 hba[ctlr]->intr = intr;
28957 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
28958 hba[ctlr]->product_name = products[j].product_name;
28959- hba[ctlr]->access = *(products[j].access);
28960+ hba[ctlr]->access = products[j].access;
28961 hba[ctlr]->ctlr = ctlr;
28962 hba[ctlr]->board_id = board_id;
28963 hba[ctlr]->pci_dev = NULL; /* not PCI */
28964@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
28965
28966 while((c = h->reqQ) != NULL) {
28967 /* Can't do anything if we're busy */
28968- if (h->access.fifo_full(h) == 0)
28969+ if (h->access->fifo_full(h) == 0)
28970 return;
28971
28972 /* Get the first entry from the request Q */
28973@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
28974 h->Qdepth--;
28975
28976 /* Tell the controller to do our bidding */
28977- h->access.submit_command(h, c);
28978+ h->access->submit_command(h, c);
28979
28980 /* Get onto the completion Q */
28981 addQ(&h->cmpQ, c);
28982@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
28983 unsigned long flags;
28984 __u32 a,a1;
28985
28986- istat = h->access.intr_pending(h);
28987+ istat = h->access->intr_pending(h);
28988 /* Is this interrupt for us? */
28989 if (istat == 0)
28990 return IRQ_NONE;
28991@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
28992 */
28993 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
28994 if (istat & FIFO_NOT_EMPTY) {
28995- while((a = h->access.command_completed(h))) {
28996+ while((a = h->access->command_completed(h))) {
28997 a1 = a; a &= ~3;
28998 if ((c = h->cmpQ) == NULL)
28999 {
29000@@ -1449,11 +1449,11 @@ static int sendcmd(
29001 /*
29002 * Disable interrupt
29003 */
29004- info_p->access.set_intr_mask(info_p, 0);
29005+ info_p->access->set_intr_mask(info_p, 0);
29006 /* Make sure there is room in the command FIFO */
29007 /* Actually it should be completely empty at this time. */
29008 for (i = 200000; i > 0; i--) {
29009- temp = info_p->access.fifo_full(info_p);
29010+ temp = info_p->access->fifo_full(info_p);
29011 if (temp != 0) {
29012 break;
29013 }
29014@@ -1466,7 +1466,7 @@ DBG(
29015 /*
29016 * Send the cmd
29017 */
29018- info_p->access.submit_command(info_p, c);
29019+ info_p->access->submit_command(info_p, c);
29020 complete = pollcomplete(ctlr);
29021
29022 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
29023@@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
29024 * we check the new geometry. Then turn interrupts back on when
29025 * we're done.
29026 */
29027- host->access.set_intr_mask(host, 0);
29028+ host->access->set_intr_mask(host, 0);
29029 getgeometry(ctlr);
29030- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
29031+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
29032
29033 for(i=0; i<NWD; i++) {
29034 struct gendisk *disk = ida_gendisk[ctlr][i];
29035@@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
29036 /* Wait (up to 2 seconds) for a command to complete */
29037
29038 for (i = 200000; i > 0; i--) {
29039- done = hba[ctlr]->access.command_completed(hba[ctlr]);
29040+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
29041 if (done == 0) {
29042 udelay(10); /* a short fixed delay */
29043 } else
29044diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
29045index be73e9d..7fbf140 100644
29046--- a/drivers/block/cpqarray.h
29047+++ b/drivers/block/cpqarray.h
29048@@ -99,7 +99,7 @@ struct ctlr_info {
29049 drv_info_t drv[NWD];
29050 struct proc_dir_entry *proc;
29051
29052- struct access_method access;
29053+ struct access_method *access;
29054
29055 cmdlist_t *reqQ;
29056 cmdlist_t *cmpQ;
29057diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
29058index 8d68056..e67050f 100644
29059--- a/drivers/block/drbd/drbd_int.h
29060+++ b/drivers/block/drbd/drbd_int.h
29061@@ -736,7 +736,7 @@ struct drbd_request;
29062 struct drbd_epoch {
29063 struct list_head list;
29064 unsigned int barrier_nr;
29065- atomic_t epoch_size; /* increased on every request added. */
29066+ atomic_unchecked_t epoch_size; /* increased on every request added. */
29067 atomic_t active; /* increased on every req. added, and dec on every finished. */
29068 unsigned long flags;
29069 };
29070@@ -1108,7 +1108,7 @@ struct drbd_conf {
29071 void *int_dig_in;
29072 void *int_dig_vv;
29073 wait_queue_head_t seq_wait;
29074- atomic_t packet_seq;
29075+ atomic_unchecked_t packet_seq;
29076 unsigned int peer_seq;
29077 spinlock_t peer_seq_lock;
29078 unsigned int minor;
29079@@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
29080
29081 static inline void drbd_tcp_cork(struct socket *sock)
29082 {
29083- int __user val = 1;
29084+ int val = 1;
29085 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29086- (char __user *)&val, sizeof(val));
29087+ (char __force_user *)&val, sizeof(val));
29088 }
29089
29090 static inline void drbd_tcp_uncork(struct socket *sock)
29091 {
29092- int __user val = 0;
29093+ int val = 0;
29094 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29095- (char __user *)&val, sizeof(val));
29096+ (char __force_user *)&val, sizeof(val));
29097 }
29098
29099 static inline void drbd_tcp_nodelay(struct socket *sock)
29100 {
29101- int __user val = 1;
29102+ int val = 1;
29103 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
29104- (char __user *)&val, sizeof(val));
29105+ (char __force_user *)&val, sizeof(val));
29106 }
29107
29108 static inline void drbd_tcp_quickack(struct socket *sock)
29109 {
29110- int __user val = 2;
29111+ int val = 2;
29112 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
29113- (char __user *)&val, sizeof(val));
29114+ (char __force_user *)&val, sizeof(val));
29115 }
29116
29117 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
29118diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
29119index 211fc44..c5116f1 100644
29120--- a/drivers/block/drbd/drbd_main.c
29121+++ b/drivers/block/drbd/drbd_main.c
29122@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
29123 p.sector = sector;
29124 p.block_id = block_id;
29125 p.blksize = blksize;
29126- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
29127+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
29128
29129 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
29130 return false;
29131@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
29132 p.sector = cpu_to_be64(req->sector);
29133 p.block_id = (unsigned long)req;
29134 p.seq_num = cpu_to_be32(req->seq_num =
29135- atomic_add_return(1, &mdev->packet_seq));
29136+ atomic_add_return_unchecked(1, &mdev->packet_seq));
29137
29138 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
29139
29140@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
29141 atomic_set(&mdev->unacked_cnt, 0);
29142 atomic_set(&mdev->local_cnt, 0);
29143 atomic_set(&mdev->net_cnt, 0);
29144- atomic_set(&mdev->packet_seq, 0);
29145+ atomic_set_unchecked(&mdev->packet_seq, 0);
29146 atomic_set(&mdev->pp_in_use, 0);
29147 atomic_set(&mdev->pp_in_use_by_net, 0);
29148 atomic_set(&mdev->rs_sect_in, 0);
29149@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
29150 mdev->receiver.t_state);
29151
29152 /* no need to lock it, I'm the only thread alive */
29153- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
29154- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
29155+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
29156+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
29157 mdev->al_writ_cnt =
29158 mdev->bm_writ_cnt =
29159 mdev->read_cnt =
29160diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
29161index 946166e..356b39a 100644
29162--- a/drivers/block/drbd/drbd_nl.c
29163+++ b/drivers/block/drbd/drbd_nl.c
29164@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
29165 module_put(THIS_MODULE);
29166 }
29167
29168-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29169+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29170
29171 static unsigned short *
29172 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
29173@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
29174 cn_reply->id.idx = CN_IDX_DRBD;
29175 cn_reply->id.val = CN_VAL_DRBD;
29176
29177- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29178+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29179 cn_reply->ack = 0; /* not used here. */
29180 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29181 (int)((char *)tl - (char *)reply->tag_list);
29182@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
29183 cn_reply->id.idx = CN_IDX_DRBD;
29184 cn_reply->id.val = CN_VAL_DRBD;
29185
29186- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29187+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29188 cn_reply->ack = 0; /* not used here. */
29189 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29190 (int)((char *)tl - (char *)reply->tag_list);
29191@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
29192 cn_reply->id.idx = CN_IDX_DRBD;
29193 cn_reply->id.val = CN_VAL_DRBD;
29194
29195- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
29196+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
29197 cn_reply->ack = 0; // not used here.
29198 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29199 (int)((char*)tl - (char*)reply->tag_list);
29200@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
29201 cn_reply->id.idx = CN_IDX_DRBD;
29202 cn_reply->id.val = CN_VAL_DRBD;
29203
29204- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29205+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29206 cn_reply->ack = 0; /* not used here. */
29207 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29208 (int)((char *)tl - (char *)reply->tag_list);
29209diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
29210index 43beaca..4a5b1dd 100644
29211--- a/drivers/block/drbd/drbd_receiver.c
29212+++ b/drivers/block/drbd/drbd_receiver.c
29213@@ -894,7 +894,7 @@ retry:
29214 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
29215 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
29216
29217- atomic_set(&mdev->packet_seq, 0);
29218+ atomic_set_unchecked(&mdev->packet_seq, 0);
29219 mdev->peer_seq = 0;
29220
29221 drbd_thread_start(&mdev->asender);
29222@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29223 do {
29224 next_epoch = NULL;
29225
29226- epoch_size = atomic_read(&epoch->epoch_size);
29227+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
29228
29229 switch (ev & ~EV_CLEANUP) {
29230 case EV_PUT:
29231@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29232 rv = FE_DESTROYED;
29233 } else {
29234 epoch->flags = 0;
29235- atomic_set(&epoch->epoch_size, 0);
29236+ atomic_set_unchecked(&epoch->epoch_size, 0);
29237 /* atomic_set(&epoch->active, 0); is already zero */
29238 if (rv == FE_STILL_LIVE)
29239 rv = FE_RECYCLED;
29240@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29241 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
29242 drbd_flush(mdev);
29243
29244- if (atomic_read(&mdev->current_epoch->epoch_size)) {
29245+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29246 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
29247 if (epoch)
29248 break;
29249 }
29250
29251 epoch = mdev->current_epoch;
29252- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
29253+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
29254
29255 D_ASSERT(atomic_read(&epoch->active) == 0);
29256 D_ASSERT(epoch->flags == 0);
29257@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29258 }
29259
29260 epoch->flags = 0;
29261- atomic_set(&epoch->epoch_size, 0);
29262+ atomic_set_unchecked(&epoch->epoch_size, 0);
29263 atomic_set(&epoch->active, 0);
29264
29265 spin_lock(&mdev->epoch_lock);
29266- if (atomic_read(&mdev->current_epoch->epoch_size)) {
29267+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29268 list_add(&epoch->list, &mdev->current_epoch->list);
29269 mdev->current_epoch = epoch;
29270 mdev->epochs++;
29271@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29272 spin_unlock(&mdev->peer_seq_lock);
29273
29274 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
29275- atomic_inc(&mdev->current_epoch->epoch_size);
29276+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
29277 return drbd_drain_block(mdev, data_size);
29278 }
29279
29280@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29281
29282 spin_lock(&mdev->epoch_lock);
29283 e->epoch = mdev->current_epoch;
29284- atomic_inc(&e->epoch->epoch_size);
29285+ atomic_inc_unchecked(&e->epoch->epoch_size);
29286 atomic_inc(&e->epoch->active);
29287 spin_unlock(&mdev->epoch_lock);
29288
29289@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
29290 D_ASSERT(list_empty(&mdev->done_ee));
29291
29292 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
29293- atomic_set(&mdev->current_epoch->epoch_size, 0);
29294+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
29295 D_ASSERT(list_empty(&mdev->current_epoch->list));
29296 }
29297
29298diff --git a/drivers/block/loop.c b/drivers/block/loop.c
29299index bbca966..65e37dd 100644
29300--- a/drivers/block/loop.c
29301+++ b/drivers/block/loop.c
29302@@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
29303 mm_segment_t old_fs = get_fs();
29304
29305 set_fs(get_ds());
29306- bw = file->f_op->write(file, buf, len, &pos);
29307+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
29308 set_fs(old_fs);
29309 if (likely(bw == len))
29310 return 0;
29311diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
29312index ee94686..3e09ad3 100644
29313--- a/drivers/char/Kconfig
29314+++ b/drivers/char/Kconfig
29315@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
29316
29317 config DEVKMEM
29318 bool "/dev/kmem virtual device support"
29319- default y
29320+ default n
29321+ depends on !GRKERNSEC_KMEM
29322 help
29323 Say Y here if you want to support the /dev/kmem device. The
29324 /dev/kmem device is rarely used, but can be used for certain
29325@@ -581,6 +582,7 @@ config DEVPORT
29326 bool
29327 depends on !M68K
29328 depends on ISA || PCI
29329+ depends on !GRKERNSEC_KMEM
29330 default y
29331
29332 source "drivers/s390/char/Kconfig"
29333diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
29334index 2e04433..22afc64 100644
29335--- a/drivers/char/agp/frontend.c
29336+++ b/drivers/char/agp/frontend.c
29337@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
29338 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
29339 return -EFAULT;
29340
29341- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
29342+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
29343 return -EFAULT;
29344
29345 client = agp_find_client_by_pid(reserve.pid);
29346diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
29347index 21cb980..f15107c 100644
29348--- a/drivers/char/genrtc.c
29349+++ b/drivers/char/genrtc.c
29350@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct file *file,
29351 switch (cmd) {
29352
29353 case RTC_PLL_GET:
29354+ memset(&pll, 0, sizeof(pll));
29355 if (get_rtc_pll(&pll))
29356 return -EINVAL;
29357 else
29358diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
29359index dfd7876..c0b0885 100644
29360--- a/drivers/char/hpet.c
29361+++ b/drivers/char/hpet.c
29362@@ -571,7 +571,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
29363 }
29364
29365 static int
29366-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
29367+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
29368 struct hpet_info *info)
29369 {
29370 struct hpet_timer __iomem *timer;
29371diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
29372index 2c29942..604c5ba 100644
29373--- a/drivers/char/ipmi/ipmi_msghandler.c
29374+++ b/drivers/char/ipmi/ipmi_msghandler.c
29375@@ -420,7 +420,7 @@ struct ipmi_smi {
29376 struct proc_dir_entry *proc_dir;
29377 char proc_dir_name[10];
29378
29379- atomic_t stats[IPMI_NUM_STATS];
29380+ atomic_unchecked_t stats[IPMI_NUM_STATS];
29381
29382 /*
29383 * run_to_completion duplicate of smb_info, smi_info
29384@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
29385
29386
29387 #define ipmi_inc_stat(intf, stat) \
29388- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
29389+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
29390 #define ipmi_get_stat(intf, stat) \
29391- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
29392+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
29393
29394 static int is_lan_addr(struct ipmi_addr *addr)
29395 {
29396@@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
29397 INIT_LIST_HEAD(&intf->cmd_rcvrs);
29398 init_waitqueue_head(&intf->waitq);
29399 for (i = 0; i < IPMI_NUM_STATS; i++)
29400- atomic_set(&intf->stats[i], 0);
29401+ atomic_set_unchecked(&intf->stats[i], 0);
29402
29403 intf->proc_dir = NULL;
29404
29405diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
29406index 1e638ff..a869ef5 100644
29407--- a/drivers/char/ipmi/ipmi_si_intf.c
29408+++ b/drivers/char/ipmi/ipmi_si_intf.c
29409@@ -275,7 +275,7 @@ struct smi_info {
29410 unsigned char slave_addr;
29411
29412 /* Counters and things for the proc filesystem. */
29413- atomic_t stats[SI_NUM_STATS];
29414+ atomic_unchecked_t stats[SI_NUM_STATS];
29415
29416 struct task_struct *thread;
29417
29418@@ -284,9 +284,9 @@ struct smi_info {
29419 };
29420
29421 #define smi_inc_stat(smi, stat) \
29422- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
29423+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
29424 #define smi_get_stat(smi, stat) \
29425- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
29426+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
29427
29428 #define SI_MAX_PARMS 4
29429
29430@@ -3209,7 +3209,7 @@ static int try_smi_init(struct smi_info *new_smi)
29431 atomic_set(&new_smi->req_events, 0);
29432 new_smi->run_to_completion = 0;
29433 for (i = 0; i < SI_NUM_STATS; i++)
29434- atomic_set(&new_smi->stats[i], 0);
29435+ atomic_set_unchecked(&new_smi->stats[i], 0);
29436
29437 new_smi->interrupt_disabled = 1;
29438 atomic_set(&new_smi->stop_operation, 0);
29439diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
29440index 47ff7e4..0c7d340 100644
29441--- a/drivers/char/mbcs.c
29442+++ b/drivers/char/mbcs.c
29443@@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
29444 return 0;
29445 }
29446
29447-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
29448+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
29449 {
29450 .part_num = MBCS_PART_NUM,
29451 .mfg_num = MBCS_MFG_NUM,
29452diff --git a/drivers/char/mem.c b/drivers/char/mem.c
29453index d6e9d08..4493e89 100644
29454--- a/drivers/char/mem.c
29455+++ b/drivers/char/mem.c
29456@@ -18,6 +18,7 @@
29457 #include <linux/raw.h>
29458 #include <linux/tty.h>
29459 #include <linux/capability.h>
29460+#include <linux/security.h>
29461 #include <linux/ptrace.h>
29462 #include <linux/device.h>
29463 #include <linux/highmem.h>
29464@@ -35,6 +36,10 @@
29465 # include <linux/efi.h>
29466 #endif
29467
29468+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29469+extern const struct file_operations grsec_fops;
29470+#endif
29471+
29472 static inline unsigned long size_inside_page(unsigned long start,
29473 unsigned long size)
29474 {
29475@@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29476
29477 while (cursor < to) {
29478 if (!devmem_is_allowed(pfn)) {
29479+#ifdef CONFIG_GRKERNSEC_KMEM
29480+ gr_handle_mem_readwrite(from, to);
29481+#else
29482 printk(KERN_INFO
29483 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
29484 current->comm, from, to);
29485+#endif
29486 return 0;
29487 }
29488 cursor += PAGE_SIZE;
29489@@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29490 }
29491 return 1;
29492 }
29493+#elif defined(CONFIG_GRKERNSEC_KMEM)
29494+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29495+{
29496+ return 0;
29497+}
29498 #else
29499 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29500 {
29501@@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29502
29503 while (count > 0) {
29504 unsigned long remaining;
29505+ char *temp;
29506
29507 sz = size_inside_page(p, count);
29508
29509@@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29510 if (!ptr)
29511 return -EFAULT;
29512
29513- remaining = copy_to_user(buf, ptr, sz);
29514+#ifdef CONFIG_PAX_USERCOPY
29515+ temp = kmalloc(sz, GFP_KERNEL);
29516+ if (!temp) {
29517+ unxlate_dev_mem_ptr(p, ptr);
29518+ return -ENOMEM;
29519+ }
29520+ memcpy(temp, ptr, sz);
29521+#else
29522+ temp = ptr;
29523+#endif
29524+
29525+ remaining = copy_to_user(buf, temp, sz);
29526+
29527+#ifdef CONFIG_PAX_USERCOPY
29528+ kfree(temp);
29529+#endif
29530+
29531 unxlate_dev_mem_ptr(p, ptr);
29532 if (remaining)
29533 return -EFAULT;
29534@@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29535 size_t count, loff_t *ppos)
29536 {
29537 unsigned long p = *ppos;
29538- ssize_t low_count, read, sz;
29539+ ssize_t low_count, read, sz, err = 0;
29540 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
29541- int err = 0;
29542
29543 read = 0;
29544 if (p < (unsigned long) high_memory) {
29545@@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29546 }
29547 #endif
29548 while (low_count > 0) {
29549+ char *temp;
29550+
29551 sz = size_inside_page(p, low_count);
29552
29553 /*
29554@@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29555 */
29556 kbuf = xlate_dev_kmem_ptr((char *)p);
29557
29558- if (copy_to_user(buf, kbuf, sz))
29559+#ifdef CONFIG_PAX_USERCOPY
29560+ temp = kmalloc(sz, GFP_KERNEL);
29561+ if (!temp)
29562+ return -ENOMEM;
29563+ memcpy(temp, kbuf, sz);
29564+#else
29565+ temp = kbuf;
29566+#endif
29567+
29568+ err = copy_to_user(buf, temp, sz);
29569+
29570+#ifdef CONFIG_PAX_USERCOPY
29571+ kfree(temp);
29572+#endif
29573+
29574+ if (err)
29575 return -EFAULT;
29576 buf += sz;
29577 p += sz;
29578@@ -867,6 +914,9 @@ static const struct memdev {
29579 #ifdef CONFIG_CRASH_DUMP
29580 [12] = { "oldmem", 0, &oldmem_fops, NULL },
29581 #endif
29582+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29583+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
29584+#endif
29585 };
29586
29587 static int memory_open(struct inode *inode, struct file *filp)
29588diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
29589index 9df78e2..01ba9ae 100644
29590--- a/drivers/char/nvram.c
29591+++ b/drivers/char/nvram.c
29592@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
29593
29594 spin_unlock_irq(&rtc_lock);
29595
29596- if (copy_to_user(buf, contents, tmp - contents))
29597+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
29598 return -EFAULT;
29599
29600 *ppos = i;
29601diff --git a/drivers/char/random.c b/drivers/char/random.c
29602index 4ec04a7..4a092ed 100644
29603--- a/drivers/char/random.c
29604+++ b/drivers/char/random.c
29605@@ -261,8 +261,13 @@
29606 /*
29607 * Configuration information
29608 */
29609+#ifdef CONFIG_GRKERNSEC_RANDNET
29610+#define INPUT_POOL_WORDS 512
29611+#define OUTPUT_POOL_WORDS 128
29612+#else
29613 #define INPUT_POOL_WORDS 128
29614 #define OUTPUT_POOL_WORDS 32
29615+#endif
29616 #define SEC_XFER_SIZE 512
29617 #define EXTRACT_SIZE 10
29618
29619@@ -300,10 +305,17 @@ static struct poolinfo {
29620 int poolwords;
29621 int tap1, tap2, tap3, tap4, tap5;
29622 } poolinfo_table[] = {
29623+#ifdef CONFIG_GRKERNSEC_RANDNET
29624+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
29625+ { 512, 411, 308, 208, 104, 1 },
29626+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
29627+ { 128, 103, 76, 51, 25, 1 },
29628+#else
29629 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
29630 { 128, 103, 76, 51, 25, 1 },
29631 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
29632 { 32, 26, 20, 14, 7, 1 },
29633+#endif
29634 #if 0
29635 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
29636 { 2048, 1638, 1231, 819, 411, 1 },
29637@@ -913,7 +925,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
29638
29639 extract_buf(r, tmp);
29640 i = min_t(int, nbytes, EXTRACT_SIZE);
29641- if (copy_to_user(buf, tmp, i)) {
29642+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
29643 ret = -EFAULT;
29644 break;
29645 }
29646@@ -1238,7 +1250,7 @@ EXPORT_SYMBOL(generate_random_uuid);
29647 #include <linux/sysctl.h>
29648
29649 static int min_read_thresh = 8, min_write_thresh;
29650-static int max_read_thresh = INPUT_POOL_WORDS * 32;
29651+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
29652 static int max_write_thresh = INPUT_POOL_WORDS * 32;
29653 static char sysctl_bootid[16];
29654
29655diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
29656index 45713f0..8286d21 100644
29657--- a/drivers/char/sonypi.c
29658+++ b/drivers/char/sonypi.c
29659@@ -54,6 +54,7 @@
29660
29661 #include <asm/uaccess.h>
29662 #include <asm/io.h>
29663+#include <asm/local.h>
29664
29665 #include <linux/sonypi.h>
29666
29667@@ -490,7 +491,7 @@ static struct sonypi_device {
29668 spinlock_t fifo_lock;
29669 wait_queue_head_t fifo_proc_list;
29670 struct fasync_struct *fifo_async;
29671- int open_count;
29672+ local_t open_count;
29673 int model;
29674 struct input_dev *input_jog_dev;
29675 struct input_dev *input_key_dev;
29676@@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
29677 static int sonypi_misc_release(struct inode *inode, struct file *file)
29678 {
29679 mutex_lock(&sonypi_device.lock);
29680- sonypi_device.open_count--;
29681+ local_dec(&sonypi_device.open_count);
29682 mutex_unlock(&sonypi_device.lock);
29683 return 0;
29684 }
29685@@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
29686 {
29687 mutex_lock(&sonypi_device.lock);
29688 /* Flush input queue on first open */
29689- if (!sonypi_device.open_count)
29690+ if (!local_read(&sonypi_device.open_count))
29691 kfifo_reset(&sonypi_device.fifo);
29692- sonypi_device.open_count++;
29693+ local_inc(&sonypi_device.open_count);
29694 mutex_unlock(&sonypi_device.lock);
29695
29696 return 0;
29697diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
29698index ad7c732..5aa8054 100644
29699--- a/drivers/char/tpm/tpm.c
29700+++ b/drivers/char/tpm/tpm.c
29701@@ -415,7 +415,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
29702 chip->vendor.req_complete_val)
29703 goto out_recv;
29704
29705- if ((status == chip->vendor.req_canceled)) {
29706+ if (status == chip->vendor.req_canceled) {
29707 dev_err(chip->dev, "Operation Canceled\n");
29708 rc = -ECANCELED;
29709 goto out;
29710diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
29711index 0636520..169c1d0 100644
29712--- a/drivers/char/tpm/tpm_bios.c
29713+++ b/drivers/char/tpm/tpm_bios.c
29714@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
29715 event = addr;
29716
29717 if ((event->event_type == 0 && event->event_size == 0) ||
29718- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
29719+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
29720 return NULL;
29721
29722 return addr;
29723@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
29724 return NULL;
29725
29726 if ((event->event_type == 0 && event->event_size == 0) ||
29727- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
29728+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
29729 return NULL;
29730
29731 (*pos)++;
29732@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
29733 int i;
29734
29735 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
29736- seq_putc(m, data[i]);
29737+ if (!seq_putc(m, data[i]))
29738+ return -EFAULT;
29739
29740 return 0;
29741 }
29742@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
29743 log->bios_event_log_end = log->bios_event_log + len;
29744
29745 virt = acpi_os_map_memory(start, len);
29746+ if (!virt) {
29747+ kfree(log->bios_event_log);
29748+ log->bios_event_log = NULL;
29749+ return -EFAULT;
29750+ }
29751
29752- memcpy(log->bios_event_log, virt, len);
29753+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
29754
29755 acpi_os_unmap_memory(virt, len);
29756 return 0;
29757diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
29758index cdf2f54..e55c197 100644
29759--- a/drivers/char/virtio_console.c
29760+++ b/drivers/char/virtio_console.c
29761@@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
29762 if (to_user) {
29763 ssize_t ret;
29764
29765- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
29766+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
29767 if (ret)
29768 return -EFAULT;
29769 } else {
29770@@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
29771 if (!port_has_data(port) && !port->host_connected)
29772 return 0;
29773
29774- return fill_readbuf(port, ubuf, count, true);
29775+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
29776 }
29777
29778 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
29779diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
29780index 97f5064..202b6e6 100644
29781--- a/drivers/edac/edac_pci_sysfs.c
29782+++ b/drivers/edac/edac_pci_sysfs.c
29783@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
29784 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
29785 static int edac_pci_poll_msec = 1000; /* one second workq period */
29786
29787-static atomic_t pci_parity_count = ATOMIC_INIT(0);
29788-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
29789+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
29790+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
29791
29792 static struct kobject *edac_pci_top_main_kobj;
29793 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
29794@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29795 edac_printk(KERN_CRIT, EDAC_PCI,
29796 "Signaled System Error on %s\n",
29797 pci_name(dev));
29798- atomic_inc(&pci_nonparity_count);
29799+ atomic_inc_unchecked(&pci_nonparity_count);
29800 }
29801
29802 if (status & (PCI_STATUS_PARITY)) {
29803@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29804 "Master Data Parity Error on %s\n",
29805 pci_name(dev));
29806
29807- atomic_inc(&pci_parity_count);
29808+ atomic_inc_unchecked(&pci_parity_count);
29809 }
29810
29811 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29812@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29813 "Detected Parity Error on %s\n",
29814 pci_name(dev));
29815
29816- atomic_inc(&pci_parity_count);
29817+ atomic_inc_unchecked(&pci_parity_count);
29818 }
29819 }
29820
29821@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29822 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
29823 "Signaled System Error on %s\n",
29824 pci_name(dev));
29825- atomic_inc(&pci_nonparity_count);
29826+ atomic_inc_unchecked(&pci_nonparity_count);
29827 }
29828
29829 if (status & (PCI_STATUS_PARITY)) {
29830@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29831 "Master Data Parity Error on "
29832 "%s\n", pci_name(dev));
29833
29834- atomic_inc(&pci_parity_count);
29835+ atomic_inc_unchecked(&pci_parity_count);
29836 }
29837
29838 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29839@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29840 "Detected Parity Error on %s\n",
29841 pci_name(dev));
29842
29843- atomic_inc(&pci_parity_count);
29844+ atomic_inc_unchecked(&pci_parity_count);
29845 }
29846 }
29847 }
29848@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
29849 if (!check_pci_errors)
29850 return;
29851
29852- before_count = atomic_read(&pci_parity_count);
29853+ before_count = atomic_read_unchecked(&pci_parity_count);
29854
29855 /* scan all PCI devices looking for a Parity Error on devices and
29856 * bridges.
29857@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
29858 /* Only if operator has selected panic on PCI Error */
29859 if (edac_pci_get_panic_on_pe()) {
29860 /* If the count is different 'after' from 'before' */
29861- if (before_count != atomic_read(&pci_parity_count))
29862+ if (before_count != atomic_read_unchecked(&pci_parity_count))
29863 panic("EDAC: PCI Parity Error");
29864 }
29865 }
29866diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
29867index c6074c5..88a9e2e 100644
29868--- a/drivers/edac/mce_amd.h
29869+++ b/drivers/edac/mce_amd.h
29870@@ -82,7 +82,7 @@ extern const char * const ii_msgs[];
29871 struct amd_decoder_ops {
29872 bool (*dc_mce)(u16, u8);
29873 bool (*ic_mce)(u16, u8);
29874-};
29875+} __no_const;
29876
29877 void amd_report_gart_errors(bool);
29878 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
29879diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
29880index cc595eb..4ec702a 100644
29881--- a/drivers/firewire/core-card.c
29882+++ b/drivers/firewire/core-card.c
29883@@ -679,7 +679,7 @@ void fw_card_release(struct kref *kref)
29884
29885 void fw_core_remove_card(struct fw_card *card)
29886 {
29887- struct fw_card_driver dummy_driver = dummy_driver_template;
29888+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
29889
29890 card->driver->update_phy_reg(card, 4,
29891 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
29892diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
29893index 2e6b245..c3857d9 100644
29894--- a/drivers/firewire/core-cdev.c
29895+++ b/drivers/firewire/core-cdev.c
29896@@ -1341,8 +1341,7 @@ static int init_iso_resource(struct client *client,
29897 int ret;
29898
29899 if ((request->channels == 0 && request->bandwidth == 0) ||
29900- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
29901- request->bandwidth < 0)
29902+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
29903 return -EINVAL;
29904
29905 r = kmalloc(sizeof(*r), GFP_KERNEL);
29906diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
29907index dea2dcc..a4fb978 100644
29908--- a/drivers/firewire/core-transaction.c
29909+++ b/drivers/firewire/core-transaction.c
29910@@ -37,6 +37,7 @@
29911 #include <linux/timer.h>
29912 #include <linux/types.h>
29913 #include <linux/workqueue.h>
29914+#include <linux/sched.h>
29915
29916 #include <asm/byteorder.h>
29917
29918diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
29919index 9047f55..e47c7ff 100644
29920--- a/drivers/firewire/core.h
29921+++ b/drivers/firewire/core.h
29922@@ -110,6 +110,7 @@ struct fw_card_driver {
29923
29924 int (*stop_iso)(struct fw_iso_context *ctx);
29925 };
29926+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
29927
29928 void fw_card_initialize(struct fw_card *card,
29929 const struct fw_card_driver *driver, struct device *device);
29930diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
29931index 153980b..4b4d046 100644
29932--- a/drivers/firmware/dmi_scan.c
29933+++ b/drivers/firmware/dmi_scan.c
29934@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
29935 }
29936 }
29937 else {
29938- /*
29939- * no iounmap() for that ioremap(); it would be a no-op, but
29940- * it's so early in setup that sucker gets confused into doing
29941- * what it shouldn't if we actually call it.
29942- */
29943 p = dmi_ioremap(0xF0000, 0x10000);
29944 if (p == NULL)
29945 goto error;
29946@@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
29947 if (buf == NULL)
29948 return -1;
29949
29950- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
29951+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
29952
29953 iounmap(buf);
29954 return 0;
29955diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
29956index 82d5c20..44a7177 100644
29957--- a/drivers/gpio/gpio-vr41xx.c
29958+++ b/drivers/gpio/gpio-vr41xx.c
29959@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
29960 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
29961 maskl, pendl, maskh, pendh);
29962
29963- atomic_inc(&irq_err_count);
29964+ atomic_inc_unchecked(&irq_err_count);
29965
29966 return -EINVAL;
29967 }
29968diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
29969index 8111889..367b253 100644
29970--- a/drivers/gpu/drm/drm_crtc_helper.c
29971+++ b/drivers/gpu/drm/drm_crtc_helper.c
29972@@ -286,7 +286,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
29973 struct drm_crtc *tmp;
29974 int crtc_mask = 1;
29975
29976- WARN(!crtc, "checking null crtc?\n");
29977+ BUG_ON(!crtc);
29978
29979 dev = crtc->dev;
29980
29981diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
29982index 6116e3b..c29dd16 100644
29983--- a/drivers/gpu/drm/drm_drv.c
29984+++ b/drivers/gpu/drm/drm_drv.c
29985@@ -316,7 +316,7 @@ module_exit(drm_core_exit);
29986 /**
29987 * Copy and IOCTL return string to user space
29988 */
29989-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
29990+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
29991 {
29992 int len;
29993
29994@@ -399,7 +399,7 @@ long drm_ioctl(struct file *filp,
29995 return -ENODEV;
29996
29997 atomic_inc(&dev->ioctl_count);
29998- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
29999+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
30000 ++file_priv->ioctl_count;
30001
30002 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
30003diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
30004index 123de28..43a0897 100644
30005--- a/drivers/gpu/drm/drm_fops.c
30006+++ b/drivers/gpu/drm/drm_fops.c
30007@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
30008 }
30009
30010 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
30011- atomic_set(&dev->counts[i], 0);
30012+ atomic_set_unchecked(&dev->counts[i], 0);
30013
30014 dev->sigdata.lock = NULL;
30015
30016@@ -138,8 +138,8 @@ int drm_open(struct inode *inode, struct file *filp)
30017
30018 retcode = drm_open_helper(inode, filp, dev);
30019 if (!retcode) {
30020- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
30021- if (!dev->open_count++)
30022+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
30023+ if (local_inc_return(&dev->open_count) == 1)
30024 retcode = drm_setup(dev);
30025 }
30026 if (!retcode) {
30027@@ -482,7 +482,7 @@ int drm_release(struct inode *inode, struct file *filp)
30028
30029 mutex_lock(&drm_global_mutex);
30030
30031- DRM_DEBUG("open_count = %d\n", dev->open_count);
30032+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
30033
30034 if (dev->driver->preclose)
30035 dev->driver->preclose(dev, file_priv);
30036@@ -491,10 +491,10 @@ int drm_release(struct inode *inode, struct file *filp)
30037 * Begin inline drm_release
30038 */
30039
30040- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
30041+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
30042 task_pid_nr(current),
30043 (long)old_encode_dev(file_priv->minor->device),
30044- dev->open_count);
30045+ local_read(&dev->open_count));
30046
30047 /* Release any auth tokens that might point to this file_priv,
30048 (do that under the drm_global_mutex) */
30049@@ -584,8 +584,8 @@ int drm_release(struct inode *inode, struct file *filp)
30050 * End inline drm_release
30051 */
30052
30053- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
30054- if (!--dev->open_count) {
30055+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
30056+ if (local_dec_and_test(&dev->open_count)) {
30057 if (atomic_read(&dev->ioctl_count)) {
30058 DRM_ERROR("Device busy: %d\n",
30059 atomic_read(&dev->ioctl_count));
30060diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
30061index c87dc96..326055d 100644
30062--- a/drivers/gpu/drm/drm_global.c
30063+++ b/drivers/gpu/drm/drm_global.c
30064@@ -36,7 +36,7 @@
30065 struct drm_global_item {
30066 struct mutex mutex;
30067 void *object;
30068- int refcount;
30069+ atomic_t refcount;
30070 };
30071
30072 static struct drm_global_item glob[DRM_GLOBAL_NUM];
30073@@ -49,7 +49,7 @@ void drm_global_init(void)
30074 struct drm_global_item *item = &glob[i];
30075 mutex_init(&item->mutex);
30076 item->object = NULL;
30077- item->refcount = 0;
30078+ atomic_set(&item->refcount, 0);
30079 }
30080 }
30081
30082@@ -59,7 +59,7 @@ void drm_global_release(void)
30083 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
30084 struct drm_global_item *item = &glob[i];
30085 BUG_ON(item->object != NULL);
30086- BUG_ON(item->refcount != 0);
30087+ BUG_ON(atomic_read(&item->refcount) != 0);
30088 }
30089 }
30090
30091@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30092 void *object;
30093
30094 mutex_lock(&item->mutex);
30095- if (item->refcount == 0) {
30096+ if (atomic_read(&item->refcount) == 0) {
30097 item->object = kzalloc(ref->size, GFP_KERNEL);
30098 if (unlikely(item->object == NULL)) {
30099 ret = -ENOMEM;
30100@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30101 goto out_err;
30102
30103 }
30104- ++item->refcount;
30105+ atomic_inc(&item->refcount);
30106 ref->object = item->object;
30107 object = item->object;
30108 mutex_unlock(&item->mutex);
30109@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
30110 struct drm_global_item *item = &glob[ref->global_type];
30111
30112 mutex_lock(&item->mutex);
30113- BUG_ON(item->refcount == 0);
30114+ BUG_ON(atomic_read(&item->refcount) == 0);
30115 BUG_ON(ref->object != item->object);
30116- if (--item->refcount == 0) {
30117+ if (atomic_dec_and_test(&item->refcount)) {
30118 ref->release(ref);
30119 item->object = NULL;
30120 }
30121diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
30122index ab1162d..42587b2 100644
30123--- a/drivers/gpu/drm/drm_info.c
30124+++ b/drivers/gpu/drm/drm_info.c
30125@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
30126 struct drm_local_map *map;
30127 struct drm_map_list *r_list;
30128
30129- /* Hardcoded from _DRM_FRAME_BUFFER,
30130- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
30131- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
30132- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
30133+ static const char * const types[] = {
30134+ [_DRM_FRAME_BUFFER] = "FB",
30135+ [_DRM_REGISTERS] = "REG",
30136+ [_DRM_SHM] = "SHM",
30137+ [_DRM_AGP] = "AGP",
30138+ [_DRM_SCATTER_GATHER] = "SG",
30139+ [_DRM_CONSISTENT] = "PCI",
30140+ [_DRM_GEM] = "GEM" };
30141 const char *type;
30142 int i;
30143
30144@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
30145 map = r_list->map;
30146 if (!map)
30147 continue;
30148- if (map->type < 0 || map->type > 5)
30149+ if (map->type >= ARRAY_SIZE(types))
30150 type = "??";
30151 else
30152 type = types[map->type];
30153@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
30154 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
30155 vma->vm_flags & VM_LOCKED ? 'l' : '-',
30156 vma->vm_flags & VM_IO ? 'i' : '-',
30157+#ifdef CONFIG_GRKERNSEC_HIDESYM
30158+ 0);
30159+#else
30160 vma->vm_pgoff);
30161+#endif
30162
30163 #if defined(__i386__)
30164 pgprot = pgprot_val(vma->vm_page_prot);
30165diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
30166index 637fcc3..e890b33 100644
30167--- a/drivers/gpu/drm/drm_ioc32.c
30168+++ b/drivers/gpu/drm/drm_ioc32.c
30169@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
30170 request = compat_alloc_user_space(nbytes);
30171 if (!access_ok(VERIFY_WRITE, request, nbytes))
30172 return -EFAULT;
30173- list = (struct drm_buf_desc *) (request + 1);
30174+ list = (struct drm_buf_desc __user *) (request + 1);
30175
30176 if (__put_user(count, &request->count)
30177 || __put_user(list, &request->list))
30178@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
30179 request = compat_alloc_user_space(nbytes);
30180 if (!access_ok(VERIFY_WRITE, request, nbytes))
30181 return -EFAULT;
30182- list = (struct drm_buf_pub *) (request + 1);
30183+ list = (struct drm_buf_pub __user *) (request + 1);
30184
30185 if (__put_user(count, &request->count)
30186 || __put_user(list, &request->list))
30187diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
30188index cf85155..f2665cb 100644
30189--- a/drivers/gpu/drm/drm_ioctl.c
30190+++ b/drivers/gpu/drm/drm_ioctl.c
30191@@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
30192 stats->data[i].value =
30193 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
30194 else
30195- stats->data[i].value = atomic_read(&dev->counts[i]);
30196+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
30197 stats->data[i].type = dev->types[i];
30198 }
30199
30200diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
30201index c79c713..2048588 100644
30202--- a/drivers/gpu/drm/drm_lock.c
30203+++ b/drivers/gpu/drm/drm_lock.c
30204@@ -90,7 +90,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30205 if (drm_lock_take(&master->lock, lock->context)) {
30206 master->lock.file_priv = file_priv;
30207 master->lock.lock_time = jiffies;
30208- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
30209+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
30210 break; /* Got lock */
30211 }
30212
30213@@ -161,7 +161,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30214 return -EINVAL;
30215 }
30216
30217- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
30218+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
30219
30220 if (drm_lock_free(&master->lock, lock->context)) {
30221 /* FIXME: Should really bail out here. */
30222diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
30223index aa454f8..6d38580 100644
30224--- a/drivers/gpu/drm/drm_stub.c
30225+++ b/drivers/gpu/drm/drm_stub.c
30226@@ -512,7 +512,7 @@ void drm_unplug_dev(struct drm_device *dev)
30227
30228 drm_device_set_unplugged(dev);
30229
30230- if (dev->open_count == 0) {
30231+ if (local_read(&dev->open_count) == 0) {
30232 drm_put_dev(dev);
30233 }
30234 mutex_unlock(&drm_global_mutex);
30235diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
30236index f920fb5..001c52d 100644
30237--- a/drivers/gpu/drm/i810/i810_dma.c
30238+++ b/drivers/gpu/drm/i810/i810_dma.c
30239@@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
30240 dma->buflist[vertex->idx],
30241 vertex->discard, vertex->used);
30242
30243- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30244- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30245+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30246+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30247 sarea_priv->last_enqueue = dev_priv->counter - 1;
30248 sarea_priv->last_dispatch = (int)hw_status[5];
30249
30250@@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
30251 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
30252 mc->last_render);
30253
30254- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30255- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30256+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30257+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30258 sarea_priv->last_enqueue = dev_priv->counter - 1;
30259 sarea_priv->last_dispatch = (int)hw_status[5];
30260
30261diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
30262index c9339f4..f5e1b9d 100644
30263--- a/drivers/gpu/drm/i810/i810_drv.h
30264+++ b/drivers/gpu/drm/i810/i810_drv.h
30265@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
30266 int page_flipping;
30267
30268 wait_queue_head_t irq_queue;
30269- atomic_t irq_received;
30270- atomic_t irq_emitted;
30271+ atomic_unchecked_t irq_received;
30272+ atomic_unchecked_t irq_emitted;
30273
30274 int front_offset;
30275 } drm_i810_private_t;
30276diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
30277index e6162a1..b2ff486 100644
30278--- a/drivers/gpu/drm/i915/i915_debugfs.c
30279+++ b/drivers/gpu/drm/i915/i915_debugfs.c
30280@@ -500,7 +500,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
30281 I915_READ(GTIMR));
30282 }
30283 seq_printf(m, "Interrupts received: %d\n",
30284- atomic_read(&dev_priv->irq_received));
30285+ atomic_read_unchecked(&dev_priv->irq_received));
30286 for (i = 0; i < I915_NUM_RINGS; i++) {
30287 if (IS_GEN6(dev) || IS_GEN7(dev)) {
30288 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
30289@@ -1313,7 +1313,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
30290 return ret;
30291
30292 if (opregion->header)
30293- seq_write(m, opregion->header, OPREGION_SIZE);
30294+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
30295
30296 mutex_unlock(&dev->struct_mutex);
30297
30298diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
30299index ba60f3c..e2dff7f 100644
30300--- a/drivers/gpu/drm/i915/i915_dma.c
30301+++ b/drivers/gpu/drm/i915/i915_dma.c
30302@@ -1178,7 +1178,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
30303 bool can_switch;
30304
30305 spin_lock(&dev->count_lock);
30306- can_switch = (dev->open_count == 0);
30307+ can_switch = (local_read(&dev->open_count) == 0);
30308 spin_unlock(&dev->count_lock);
30309 return can_switch;
30310 }
30311diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
30312index 5fabc6c..0b08aa1 100644
30313--- a/drivers/gpu/drm/i915/i915_drv.h
30314+++ b/drivers/gpu/drm/i915/i915_drv.h
30315@@ -240,7 +240,7 @@ struct drm_i915_display_funcs {
30316 /* render clock increase/decrease */
30317 /* display clock increase/decrease */
30318 /* pll clock increase/decrease */
30319-};
30320+} __no_const;
30321
30322 struct intel_device_info {
30323 u8 gen;
30324@@ -350,7 +350,7 @@ typedef struct drm_i915_private {
30325 int current_page;
30326 int page_flipping;
30327
30328- atomic_t irq_received;
30329+ atomic_unchecked_t irq_received;
30330
30331 /* protects the irq masks */
30332 spinlock_t irq_lock;
30333@@ -937,7 +937,7 @@ struct drm_i915_gem_object {
30334 * will be page flipped away on the next vblank. When it
30335 * reaches 0, dev_priv->pending_flip_queue will be woken up.
30336 */
30337- atomic_t pending_flip;
30338+ atomic_unchecked_t pending_flip;
30339 };
30340
30341 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
30342@@ -1359,7 +1359,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
30343 extern void intel_teardown_gmbus(struct drm_device *dev);
30344 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
30345 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
30346-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30347+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30348 {
30349 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
30350 }
30351diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30352index de43194..a14c4cc 100644
30353--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30354+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30355@@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
30356 i915_gem_clflush_object(obj);
30357
30358 if (obj->base.pending_write_domain)
30359- cd->flips |= atomic_read(&obj->pending_flip);
30360+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
30361
30362 /* The actual obj->write_domain will be updated with
30363 * pending_write_domain after we emit the accumulated flush for all
30364@@ -933,9 +933,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
30365
30366 static int
30367 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
30368- int count)
30369+ unsigned int count)
30370 {
30371- int i;
30372+ unsigned int i;
30373
30374 for (i = 0; i < count; i++) {
30375 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
30376diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
30377index f57e5cf..c82f79d 100644
30378--- a/drivers/gpu/drm/i915/i915_irq.c
30379+++ b/drivers/gpu/drm/i915/i915_irq.c
30380@@ -472,7 +472,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
30381 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
30382 struct drm_i915_master_private *master_priv;
30383
30384- atomic_inc(&dev_priv->irq_received);
30385+ atomic_inc_unchecked(&dev_priv->irq_received);
30386
30387 /* disable master interrupt before clearing iir */
30388 de_ier = I915_READ(DEIER);
30389@@ -563,7 +563,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
30390 struct drm_i915_master_private *master_priv;
30391 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
30392
30393- atomic_inc(&dev_priv->irq_received);
30394+ atomic_inc_unchecked(&dev_priv->irq_received);
30395
30396 if (IS_GEN6(dev))
30397 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
30398@@ -1292,7 +1292,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
30399 int ret = IRQ_NONE, pipe;
30400 bool blc_event = false;
30401
30402- atomic_inc(&dev_priv->irq_received);
30403+ atomic_inc_unchecked(&dev_priv->irq_received);
30404
30405 iir = I915_READ(IIR);
30406
30407@@ -1803,7 +1803,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
30408 {
30409 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30410
30411- atomic_set(&dev_priv->irq_received, 0);
30412+ atomic_set_unchecked(&dev_priv->irq_received, 0);
30413
30414 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
30415 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
30416@@ -1980,7 +1980,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
30417 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30418 int pipe;
30419
30420- atomic_set(&dev_priv->irq_received, 0);
30421+ atomic_set_unchecked(&dev_priv->irq_received, 0);
30422
30423 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
30424 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
30425diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
30426index d4d162f..e80037c 100644
30427--- a/drivers/gpu/drm/i915/intel_display.c
30428+++ b/drivers/gpu/drm/i915/intel_display.c
30429@@ -2254,7 +2254,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
30430
30431 wait_event(dev_priv->pending_flip_queue,
30432 atomic_read(&dev_priv->mm.wedged) ||
30433- atomic_read(&obj->pending_flip) == 0);
30434+ atomic_read_unchecked(&obj->pending_flip) == 0);
30435
30436 /* Big Hammer, we also need to ensure that any pending
30437 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
30438@@ -2919,7 +2919,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
30439 obj = to_intel_framebuffer(crtc->fb)->obj;
30440 dev_priv = crtc->dev->dev_private;
30441 wait_event(dev_priv->pending_flip_queue,
30442- atomic_read(&obj->pending_flip) == 0);
30443+ atomic_read_unchecked(&obj->pending_flip) == 0);
30444 }
30445
30446 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
30447@@ -7286,7 +7286,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
30448
30449 atomic_clear_mask(1 << intel_crtc->plane,
30450 &obj->pending_flip.counter);
30451- if (atomic_read(&obj->pending_flip) == 0)
30452+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
30453 wake_up(&dev_priv->pending_flip_queue);
30454
30455 schedule_work(&work->work);
30456@@ -7582,7 +7582,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30457 /* Block clients from rendering to the new back buffer until
30458 * the flip occurs and the object is no longer visible.
30459 */
30460- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30461+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30462
30463 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
30464 if (ret)
30465@@ -7596,7 +7596,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30466 return 0;
30467
30468 cleanup_pending:
30469- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30470+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30471 drm_gem_object_unreference(&work->old_fb_obj->base);
30472 drm_gem_object_unreference(&obj->base);
30473 mutex_unlock(&dev->struct_mutex);
30474diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
30475index 54558a0..2d97005 100644
30476--- a/drivers/gpu/drm/mga/mga_drv.h
30477+++ b/drivers/gpu/drm/mga/mga_drv.h
30478@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
30479 u32 clear_cmd;
30480 u32 maccess;
30481
30482- atomic_t vbl_received; /**< Number of vblanks received. */
30483+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
30484 wait_queue_head_t fence_queue;
30485- atomic_t last_fence_retired;
30486+ atomic_unchecked_t last_fence_retired;
30487 u32 next_fence_to_post;
30488
30489 unsigned int fb_cpp;
30490diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
30491index 2581202..f230a8d9 100644
30492--- a/drivers/gpu/drm/mga/mga_irq.c
30493+++ b/drivers/gpu/drm/mga/mga_irq.c
30494@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
30495 if (crtc != 0)
30496 return 0;
30497
30498- return atomic_read(&dev_priv->vbl_received);
30499+ return atomic_read_unchecked(&dev_priv->vbl_received);
30500 }
30501
30502
30503@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30504 /* VBLANK interrupt */
30505 if (status & MGA_VLINEPEN) {
30506 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
30507- atomic_inc(&dev_priv->vbl_received);
30508+ atomic_inc_unchecked(&dev_priv->vbl_received);
30509 drm_handle_vblank(dev, 0);
30510 handled = 1;
30511 }
30512@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30513 if ((prim_start & ~0x03) != (prim_end & ~0x03))
30514 MGA_WRITE(MGA_PRIMEND, prim_end);
30515
30516- atomic_inc(&dev_priv->last_fence_retired);
30517+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
30518 DRM_WAKEUP(&dev_priv->fence_queue);
30519 handled = 1;
30520 }
30521@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
30522 * using fences.
30523 */
30524 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
30525- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
30526+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
30527 - *sequence) <= (1 << 23)));
30528
30529 *sequence = cur_fence;
30530diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
30531index 0be4a81..7464804 100644
30532--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
30533+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
30534@@ -5329,7 +5329,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
30535 struct bit_table {
30536 const char id;
30537 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
30538-};
30539+} __no_const;
30540
30541 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
30542
30543diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
30544index 3aef353..0ad1322 100644
30545--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
30546+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
30547@@ -240,7 +240,7 @@ struct nouveau_channel {
30548 struct list_head pending;
30549 uint32_t sequence;
30550 uint32_t sequence_ack;
30551- atomic_t last_sequence_irq;
30552+ atomic_unchecked_t last_sequence_irq;
30553 struct nouveau_vma vma;
30554 } fence;
30555
30556@@ -321,7 +321,7 @@ struct nouveau_exec_engine {
30557 u32 handle, u16 class);
30558 void (*set_tile_region)(struct drm_device *dev, int i);
30559 void (*tlb_flush)(struct drm_device *, int engine);
30560-};
30561+} __no_const;
30562
30563 struct nouveau_instmem_engine {
30564 void *priv;
30565@@ -343,13 +343,13 @@ struct nouveau_instmem_engine {
30566 struct nouveau_mc_engine {
30567 int (*init)(struct drm_device *dev);
30568 void (*takedown)(struct drm_device *dev);
30569-};
30570+} __no_const;
30571
30572 struct nouveau_timer_engine {
30573 int (*init)(struct drm_device *dev);
30574 void (*takedown)(struct drm_device *dev);
30575 uint64_t (*read)(struct drm_device *dev);
30576-};
30577+} __no_const;
30578
30579 struct nouveau_fb_engine {
30580 int num_tiles;
30581@@ -590,7 +590,7 @@ struct nouveau_vram_engine {
30582 void (*put)(struct drm_device *, struct nouveau_mem **);
30583
30584 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
30585-};
30586+} __no_const;
30587
30588 struct nouveau_engine {
30589 struct nouveau_instmem_engine instmem;
30590@@ -739,7 +739,7 @@ struct drm_nouveau_private {
30591 struct drm_global_reference mem_global_ref;
30592 struct ttm_bo_global_ref bo_global_ref;
30593 struct ttm_bo_device bdev;
30594- atomic_t validate_sequence;
30595+ atomic_unchecked_t validate_sequence;
30596 } ttm;
30597
30598 struct {
30599diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
30600index c1dc20f..4df673c 100644
30601--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
30602+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
30603@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
30604 if (USE_REFCNT(dev))
30605 sequence = nvchan_rd32(chan, 0x48);
30606 else
30607- sequence = atomic_read(&chan->fence.last_sequence_irq);
30608+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
30609
30610 if (chan->fence.sequence_ack == sequence)
30611 goto out;
30612@@ -538,7 +538,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
30613 return ret;
30614 }
30615
30616- atomic_set(&chan->fence.last_sequence_irq, 0);
30617+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
30618 return 0;
30619 }
30620
30621diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
30622index ed52a6f..484acdc 100644
30623--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
30624+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
30625@@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
30626 int trycnt = 0;
30627 int ret, i;
30628
30629- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
30630+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
30631 retry:
30632 if (++trycnt > 100000) {
30633 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
30634diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
30635index c2a8511..4b996f9 100644
30636--- a/drivers/gpu/drm/nouveau/nouveau_state.c
30637+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
30638@@ -588,7 +588,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
30639 bool can_switch;
30640
30641 spin_lock(&dev->count_lock);
30642- can_switch = (dev->open_count == 0);
30643+ can_switch = (local_read(&dev->open_count) == 0);
30644 spin_unlock(&dev->count_lock);
30645 return can_switch;
30646 }
30647diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
30648index dbdea8e..cd6eeeb 100644
30649--- a/drivers/gpu/drm/nouveau/nv04_graph.c
30650+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
30651@@ -554,7 +554,7 @@ static int
30652 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
30653 u32 class, u32 mthd, u32 data)
30654 {
30655- atomic_set(&chan->fence.last_sequence_irq, data);
30656+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
30657 return 0;
30658 }
30659
30660diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
30661index 2746402..c8dc4a4 100644
30662--- a/drivers/gpu/drm/nouveau/nv50_sor.c
30663+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
30664@@ -304,7 +304,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
30665 }
30666
30667 if (nv_encoder->dcb->type == OUTPUT_DP) {
30668- struct dp_train_func func = {
30669+ static struct dp_train_func func = {
30670 .link_set = nv50_sor_dp_link_set,
30671 .train_set = nv50_sor_dp_train_set,
30672 .train_adj = nv50_sor_dp_train_adj
30673diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
30674index 0247250..d2f6aaf 100644
30675--- a/drivers/gpu/drm/nouveau/nvd0_display.c
30676+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
30677@@ -1366,7 +1366,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
30678 nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
30679
30680 if (nv_encoder->dcb->type == OUTPUT_DP) {
30681- struct dp_train_func func = {
30682+ static struct dp_train_func func = {
30683 .link_set = nvd0_sor_dp_link_set,
30684 .train_set = nvd0_sor_dp_train_set,
30685 .train_adj = nvd0_sor_dp_train_adj
30686diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
30687index bcac90b..53bfc76 100644
30688--- a/drivers/gpu/drm/r128/r128_cce.c
30689+++ b/drivers/gpu/drm/r128/r128_cce.c
30690@@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
30691
30692 /* GH: Simple idle check.
30693 */
30694- atomic_set(&dev_priv->idle_count, 0);
30695+ atomic_set_unchecked(&dev_priv->idle_count, 0);
30696
30697 /* We don't support anything other than bus-mastering ring mode,
30698 * but the ring can be in either AGP or PCI space for the ring
30699diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
30700index 930c71b..499aded 100644
30701--- a/drivers/gpu/drm/r128/r128_drv.h
30702+++ b/drivers/gpu/drm/r128/r128_drv.h
30703@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
30704 int is_pci;
30705 unsigned long cce_buffers_offset;
30706
30707- atomic_t idle_count;
30708+ atomic_unchecked_t idle_count;
30709
30710 int page_flipping;
30711 int current_page;
30712 u32 crtc_offset;
30713 u32 crtc_offset_cntl;
30714
30715- atomic_t vbl_received;
30716+ atomic_unchecked_t vbl_received;
30717
30718 u32 color_fmt;
30719 unsigned int front_offset;
30720diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
30721index 429d5a0..7e899ed 100644
30722--- a/drivers/gpu/drm/r128/r128_irq.c
30723+++ b/drivers/gpu/drm/r128/r128_irq.c
30724@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
30725 if (crtc != 0)
30726 return 0;
30727
30728- return atomic_read(&dev_priv->vbl_received);
30729+ return atomic_read_unchecked(&dev_priv->vbl_received);
30730 }
30731
30732 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30733@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30734 /* VBLANK interrupt */
30735 if (status & R128_CRTC_VBLANK_INT) {
30736 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
30737- atomic_inc(&dev_priv->vbl_received);
30738+ atomic_inc_unchecked(&dev_priv->vbl_received);
30739 drm_handle_vblank(dev, 0);
30740 return IRQ_HANDLED;
30741 }
30742diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
30743index a9e33ce..09edd4b 100644
30744--- a/drivers/gpu/drm/r128/r128_state.c
30745+++ b/drivers/gpu/drm/r128/r128_state.c
30746@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
30747
30748 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
30749 {
30750- if (atomic_read(&dev_priv->idle_count) == 0)
30751+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
30752 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
30753 else
30754- atomic_set(&dev_priv->idle_count, 0);
30755+ atomic_set_unchecked(&dev_priv->idle_count, 0);
30756 }
30757
30758 #endif
30759diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
30760index 5a82b6b..9e69c73 100644
30761--- a/drivers/gpu/drm/radeon/mkregtable.c
30762+++ b/drivers/gpu/drm/radeon/mkregtable.c
30763@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
30764 regex_t mask_rex;
30765 regmatch_t match[4];
30766 char buf[1024];
30767- size_t end;
30768+ long end;
30769 int len;
30770 int done = 0;
30771 int r;
30772 unsigned o;
30773 struct offset *offset;
30774 char last_reg_s[10];
30775- int last_reg;
30776+ unsigned long last_reg;
30777
30778 if (regcomp
30779 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
30780diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
30781index 138b952..d74f9cb 100644
30782--- a/drivers/gpu/drm/radeon/radeon.h
30783+++ b/drivers/gpu/drm/radeon/radeon.h
30784@@ -253,7 +253,7 @@ struct radeon_fence_driver {
30785 uint32_t scratch_reg;
30786 uint64_t gpu_addr;
30787 volatile uint32_t *cpu_addr;
30788- atomic_t seq;
30789+ atomic_unchecked_t seq;
30790 uint32_t last_seq;
30791 unsigned long last_jiffies;
30792 unsigned long last_timeout;
30793@@ -753,7 +753,7 @@ struct r600_blit_cp_primitives {
30794 int x2, int y2);
30795 void (*draw_auto)(struct radeon_device *rdev);
30796 void (*set_default_state)(struct radeon_device *rdev);
30797-};
30798+} __no_const;
30799
30800 struct r600_blit {
30801 struct mutex mutex;
30802@@ -1246,7 +1246,7 @@ struct radeon_asic {
30803 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
30804 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
30805 } pflip;
30806-};
30807+} __no_const;
30808
30809 /*
30810 * Asic structures
30811diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
30812index 5992502..c19c633 100644
30813--- a/drivers/gpu/drm/radeon/radeon_device.c
30814+++ b/drivers/gpu/drm/radeon/radeon_device.c
30815@@ -691,7 +691,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
30816 bool can_switch;
30817
30818 spin_lock(&dev->count_lock);
30819- can_switch = (dev->open_count == 0);
30820+ can_switch = (local_read(&dev->open_count) == 0);
30821 spin_unlock(&dev->count_lock);
30822 return can_switch;
30823 }
30824diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
30825index a1b59ca..86f2d44 100644
30826--- a/drivers/gpu/drm/radeon/radeon_drv.h
30827+++ b/drivers/gpu/drm/radeon/radeon_drv.h
30828@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
30829
30830 /* SW interrupt */
30831 wait_queue_head_t swi_queue;
30832- atomic_t swi_emitted;
30833+ atomic_unchecked_t swi_emitted;
30834 int vblank_crtc;
30835 uint32_t irq_enable_reg;
30836 uint32_t r500_disp_irq_reg;
30837diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
30838index 4bd36a3..e66fe9c 100644
30839--- a/drivers/gpu/drm/radeon/radeon_fence.c
30840+++ b/drivers/gpu/drm/radeon/radeon_fence.c
30841@@ -70,7 +70,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
30842 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
30843 return 0;
30844 }
30845- fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
30846+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv[fence->ring].seq);
30847 if (!rdev->ring[fence->ring].ready)
30848 /* FIXME: cp is not running assume everythings is done right
30849 * away
30850@@ -405,7 +405,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
30851 }
30852 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
30853 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
30854- radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
30855+ radeon_fence_write(rdev, atomic_read_unchecked(&rdev->fence_drv[ring].seq), ring);
30856 rdev->fence_drv[ring].initialized = true;
30857 DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
30858 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
30859@@ -418,7 +418,7 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
30860 rdev->fence_drv[ring].scratch_reg = -1;
30861 rdev->fence_drv[ring].cpu_addr = NULL;
30862 rdev->fence_drv[ring].gpu_addr = 0;
30863- atomic_set(&rdev->fence_drv[ring].seq, 0);
30864+ atomic_set_unchecked(&rdev->fence_drv[ring].seq, 0);
30865 INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
30866 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
30867 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
30868diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
30869index 48b7cea..342236f 100644
30870--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
30871+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
30872@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
30873 request = compat_alloc_user_space(sizeof(*request));
30874 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
30875 || __put_user(req32.param, &request->param)
30876- || __put_user((void __user *)(unsigned long)req32.value,
30877+ || __put_user((unsigned long)req32.value,
30878 &request->value))
30879 return -EFAULT;
30880
30881diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
30882index 00da384..32f972d 100644
30883--- a/drivers/gpu/drm/radeon/radeon_irq.c
30884+++ b/drivers/gpu/drm/radeon/radeon_irq.c
30885@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
30886 unsigned int ret;
30887 RING_LOCALS;
30888
30889- atomic_inc(&dev_priv->swi_emitted);
30890- ret = atomic_read(&dev_priv->swi_emitted);
30891+ atomic_inc_unchecked(&dev_priv->swi_emitted);
30892+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
30893
30894 BEGIN_RING(4);
30895 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
30896@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
30897 drm_radeon_private_t *dev_priv =
30898 (drm_radeon_private_t *) dev->dev_private;
30899
30900- atomic_set(&dev_priv->swi_emitted, 0);
30901+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
30902 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
30903
30904 dev->max_vblank_count = 0x001fffff;
30905diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
30906index e8422ae..d22d4a8 100644
30907--- a/drivers/gpu/drm/radeon/radeon_state.c
30908+++ b/drivers/gpu/drm/radeon/radeon_state.c
30909@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
30910 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
30911 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
30912
30913- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
30914+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
30915 sarea_priv->nbox * sizeof(depth_boxes[0])))
30916 return -EFAULT;
30917
30918@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
30919 {
30920 drm_radeon_private_t *dev_priv = dev->dev_private;
30921 drm_radeon_getparam_t *param = data;
30922- int value;
30923+ int value = 0;
30924
30925 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
30926
30927diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
30928index f493c64..524ab6b 100644
30929--- a/drivers/gpu/drm/radeon/radeon_ttm.c
30930+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
30931@@ -843,8 +843,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
30932 }
30933 if (unlikely(ttm_vm_ops == NULL)) {
30934 ttm_vm_ops = vma->vm_ops;
30935- radeon_ttm_vm_ops = *ttm_vm_ops;
30936- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30937+ pax_open_kernel();
30938+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
30939+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30940+ pax_close_kernel();
30941 }
30942 vma->vm_ops = &radeon_ttm_vm_ops;
30943 return 0;
30944diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
30945index f2c3b9d..d5a376b 100644
30946--- a/drivers/gpu/drm/radeon/rs690.c
30947+++ b/drivers/gpu/drm/radeon/rs690.c
30948@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
30949 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
30950 rdev->pm.sideport_bandwidth.full)
30951 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
30952- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
30953+ read_delay_latency.full = dfixed_const(800 * 1000);
30954 read_delay_latency.full = dfixed_div(read_delay_latency,
30955 rdev->pm.igp_sideport_mclk);
30956+ a.full = dfixed_const(370);
30957+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
30958 } else {
30959 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
30960 rdev->pm.k8_bandwidth.full)
30961diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
30962index ebc6fac..a8313ed 100644
30963--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
30964+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
30965@@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
30966 static int ttm_pool_mm_shrink(struct shrinker *shrink,
30967 struct shrink_control *sc)
30968 {
30969- static atomic_t start_pool = ATOMIC_INIT(0);
30970+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
30971 unsigned i;
30972- unsigned pool_offset = atomic_add_return(1, &start_pool);
30973+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
30974 struct ttm_page_pool *pool;
30975 int shrink_pages = sc->nr_to_scan;
30976
30977diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
30978index 88edacc..1e5412b 100644
30979--- a/drivers/gpu/drm/via/via_drv.h
30980+++ b/drivers/gpu/drm/via/via_drv.h
30981@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
30982 typedef uint32_t maskarray_t[5];
30983
30984 typedef struct drm_via_irq {
30985- atomic_t irq_received;
30986+ atomic_unchecked_t irq_received;
30987 uint32_t pending_mask;
30988 uint32_t enable_mask;
30989 wait_queue_head_t irq_queue;
30990@@ -75,7 +75,7 @@ typedef struct drm_via_private {
30991 struct timeval last_vblank;
30992 int last_vblank_valid;
30993 unsigned usec_per_vblank;
30994- atomic_t vbl_received;
30995+ atomic_unchecked_t vbl_received;
30996 drm_via_state_t hc_state;
30997 char pci_buf[VIA_PCI_BUF_SIZE];
30998 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
30999diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
31000index d391f48..10c8ca3 100644
31001--- a/drivers/gpu/drm/via/via_irq.c
31002+++ b/drivers/gpu/drm/via/via_irq.c
31003@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
31004 if (crtc != 0)
31005 return 0;
31006
31007- return atomic_read(&dev_priv->vbl_received);
31008+ return atomic_read_unchecked(&dev_priv->vbl_received);
31009 }
31010
31011 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31012@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31013
31014 status = VIA_READ(VIA_REG_INTERRUPT);
31015 if (status & VIA_IRQ_VBLANK_PENDING) {
31016- atomic_inc(&dev_priv->vbl_received);
31017- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
31018+ atomic_inc_unchecked(&dev_priv->vbl_received);
31019+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
31020 do_gettimeofday(&cur_vblank);
31021 if (dev_priv->last_vblank_valid) {
31022 dev_priv->usec_per_vblank =
31023@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31024 dev_priv->last_vblank = cur_vblank;
31025 dev_priv->last_vblank_valid = 1;
31026 }
31027- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
31028+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
31029 DRM_DEBUG("US per vblank is: %u\n",
31030 dev_priv->usec_per_vblank);
31031 }
31032@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31033
31034 for (i = 0; i < dev_priv->num_irqs; ++i) {
31035 if (status & cur_irq->pending_mask) {
31036- atomic_inc(&cur_irq->irq_received);
31037+ atomic_inc_unchecked(&cur_irq->irq_received);
31038 DRM_WAKEUP(&cur_irq->irq_queue);
31039 handled = 1;
31040 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
31041@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
31042 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31043 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
31044 masks[irq][4]));
31045- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
31046+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
31047 } else {
31048 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31049 (((cur_irq_sequence =
31050- atomic_read(&cur_irq->irq_received)) -
31051+ atomic_read_unchecked(&cur_irq->irq_received)) -
31052 *sequence) <= (1 << 23)));
31053 }
31054 *sequence = cur_irq_sequence;
31055@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
31056 }
31057
31058 for (i = 0; i < dev_priv->num_irqs; ++i) {
31059- atomic_set(&cur_irq->irq_received, 0);
31060+ atomic_set_unchecked(&cur_irq->irq_received, 0);
31061 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
31062 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
31063 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
31064@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
31065 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
31066 case VIA_IRQ_RELATIVE:
31067 irqwait->request.sequence +=
31068- atomic_read(&cur_irq->irq_received);
31069+ atomic_read_unchecked(&cur_irq->irq_received);
31070 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
31071 case VIA_IRQ_ABSOLUTE:
31072 break;
31073diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31074index d0f2c07..9ebd9c3 100644
31075--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31076+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31077@@ -263,7 +263,7 @@ struct vmw_private {
31078 * Fencing and IRQs.
31079 */
31080
31081- atomic_t marker_seq;
31082+ atomic_unchecked_t marker_seq;
31083 wait_queue_head_t fence_queue;
31084 wait_queue_head_t fifo_queue;
31085 int fence_queue_waiters; /* Protected by hw_mutex */
31086diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31087index a0c2f12..68ae6cb 100644
31088--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31089+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31090@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
31091 (unsigned int) min,
31092 (unsigned int) fifo->capabilities);
31093
31094- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31095+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31096 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
31097 vmw_marker_queue_init(&fifo->marker_queue);
31098 return vmw_fifo_send_fence(dev_priv, &dummy);
31099@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
31100 if (reserveable)
31101 iowrite32(bytes, fifo_mem +
31102 SVGA_FIFO_RESERVED);
31103- return fifo_mem + (next_cmd >> 2);
31104+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
31105 } else {
31106 need_bounce = true;
31107 }
31108@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31109
31110 fm = vmw_fifo_reserve(dev_priv, bytes);
31111 if (unlikely(fm == NULL)) {
31112- *seqno = atomic_read(&dev_priv->marker_seq);
31113+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31114 ret = -ENOMEM;
31115 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
31116 false, 3*HZ);
31117@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31118 }
31119
31120 do {
31121- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
31122+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
31123 } while (*seqno == 0);
31124
31125 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
31126diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31127index cabc95f..14b3d77 100644
31128--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31129+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31130@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
31131 * emitted. Then the fence is stale and signaled.
31132 */
31133
31134- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
31135+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
31136 > VMW_FENCE_WRAP);
31137
31138 return ret;
31139@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
31140
31141 if (fifo_idle)
31142 down_read(&fifo_state->rwsem);
31143- signal_seq = atomic_read(&dev_priv->marker_seq);
31144+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
31145 ret = 0;
31146
31147 for (;;) {
31148diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31149index 8a8725c..afed796 100644
31150--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31151+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31152@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
31153 while (!vmw_lag_lt(queue, us)) {
31154 spin_lock(&queue->lock);
31155 if (list_empty(&queue->head))
31156- seqno = atomic_read(&dev_priv->marker_seq);
31157+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31158 else {
31159 marker = list_first_entry(&queue->head,
31160 struct vmw_marker, head);
31161diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
31162index 4da66b4..e948655 100644
31163--- a/drivers/hid/hid-core.c
31164+++ b/drivers/hid/hid-core.c
31165@@ -2063,7 +2063,7 @@ static bool hid_ignore(struct hid_device *hdev)
31166
31167 int hid_add_device(struct hid_device *hdev)
31168 {
31169- static atomic_t id = ATOMIC_INIT(0);
31170+ static atomic_unchecked_t id = ATOMIC_INIT(0);
31171 int ret;
31172
31173 if (WARN_ON(hdev->status & HID_STAT_ADDED))
31174@@ -2078,7 +2078,7 @@ int hid_add_device(struct hid_device *hdev)
31175 /* XXX hack, any other cleaner solution after the driver core
31176 * is converted to allow more than 20 bytes as the device name? */
31177 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
31178- hdev->vendor, hdev->product, atomic_inc_return(&id));
31179+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
31180
31181 hid_debug_register(hdev, dev_name(&hdev->dev));
31182 ret = device_add(&hdev->dev);
31183diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
31184index eec3291..8ed706b 100644
31185--- a/drivers/hid/hid-wiimote-debug.c
31186+++ b/drivers/hid/hid-wiimote-debug.c
31187@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
31188 else if (size == 0)
31189 return -EIO;
31190
31191- if (copy_to_user(u, buf, size))
31192+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
31193 return -EFAULT;
31194
31195 *off += size;
31196diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
31197index b1ec0e2..c295a61 100644
31198--- a/drivers/hid/usbhid/hiddev.c
31199+++ b/drivers/hid/usbhid/hiddev.c
31200@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
31201 break;
31202
31203 case HIDIOCAPPLICATION:
31204- if (arg < 0 || arg >= hid->maxapplication)
31205+ if (arg >= hid->maxapplication)
31206 break;
31207
31208 for (i = 0; i < hid->maxcollection; i++)
31209diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
31210index 4065374..10ed7dc 100644
31211--- a/drivers/hv/channel.c
31212+++ b/drivers/hv/channel.c
31213@@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
31214 int ret = 0;
31215 int t;
31216
31217- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
31218- atomic_inc(&vmbus_connection.next_gpadl_handle);
31219+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
31220+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
31221
31222 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
31223 if (ret)
31224diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
31225index 15956bd..ea34398 100644
31226--- a/drivers/hv/hv.c
31227+++ b/drivers/hv/hv.c
31228@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
31229 u64 output_address = (output) ? virt_to_phys(output) : 0;
31230 u32 output_address_hi = output_address >> 32;
31231 u32 output_address_lo = output_address & 0xFFFFFFFF;
31232- void *hypercall_page = hv_context.hypercall_page;
31233+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
31234
31235 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
31236 "=a"(hv_status_lo) : "d" (control_hi),
31237diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
31238index 699f0d8..f4f19250 100644
31239--- a/drivers/hv/hyperv_vmbus.h
31240+++ b/drivers/hv/hyperv_vmbus.h
31241@@ -555,7 +555,7 @@ enum vmbus_connect_state {
31242 struct vmbus_connection {
31243 enum vmbus_connect_state conn_state;
31244
31245- atomic_t next_gpadl_handle;
31246+ atomic_unchecked_t next_gpadl_handle;
31247
31248 /*
31249 * Represents channel interrupts. Each bit position represents a
31250diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
31251index a220e57..428f54d 100644
31252--- a/drivers/hv/vmbus_drv.c
31253+++ b/drivers/hv/vmbus_drv.c
31254@@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
31255 {
31256 int ret = 0;
31257
31258- static atomic_t device_num = ATOMIC_INIT(0);
31259+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
31260
31261 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
31262- atomic_inc_return(&device_num));
31263+ atomic_inc_return_unchecked(&device_num));
31264
31265 child_device_obj->device.bus = &hv_bus;
31266 child_device_obj->device.parent = &hv_acpi_dev->dev;
31267diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
31268index 9140236..ceaef4e 100644
31269--- a/drivers/hwmon/acpi_power_meter.c
31270+++ b/drivers/hwmon/acpi_power_meter.c
31271@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
31272 return res;
31273
31274 temp /= 1000;
31275- if (temp < 0)
31276- return -EINVAL;
31277
31278 mutex_lock(&resource->lock);
31279 resource->trip[attr->index - 7] = temp;
31280diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
31281index 8b011d0..3de24a1 100644
31282--- a/drivers/hwmon/sht15.c
31283+++ b/drivers/hwmon/sht15.c
31284@@ -166,7 +166,7 @@ struct sht15_data {
31285 int supply_uV;
31286 bool supply_uV_valid;
31287 struct work_struct update_supply_work;
31288- atomic_t interrupt_handled;
31289+ atomic_unchecked_t interrupt_handled;
31290 };
31291
31292 /**
31293@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
31294 return ret;
31295
31296 gpio_direction_input(data->pdata->gpio_data);
31297- atomic_set(&data->interrupt_handled, 0);
31298+ atomic_set_unchecked(&data->interrupt_handled, 0);
31299
31300 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31301 if (gpio_get_value(data->pdata->gpio_data) == 0) {
31302 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
31303 /* Only relevant if the interrupt hasn't occurred. */
31304- if (!atomic_read(&data->interrupt_handled))
31305+ if (!atomic_read_unchecked(&data->interrupt_handled))
31306 schedule_work(&data->read_work);
31307 }
31308 ret = wait_event_timeout(data->wait_queue,
31309@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
31310
31311 /* First disable the interrupt */
31312 disable_irq_nosync(irq);
31313- atomic_inc(&data->interrupt_handled);
31314+ atomic_inc_unchecked(&data->interrupt_handled);
31315 /* Then schedule a reading work struct */
31316 if (data->state != SHT15_READING_NOTHING)
31317 schedule_work(&data->read_work);
31318@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
31319 * If not, then start the interrupt again - care here as could
31320 * have gone low in meantime so verify it hasn't!
31321 */
31322- atomic_set(&data->interrupt_handled, 0);
31323+ atomic_set_unchecked(&data->interrupt_handled, 0);
31324 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31325 /* If still not occurred or another handler was scheduled */
31326 if (gpio_get_value(data->pdata->gpio_data)
31327- || atomic_read(&data->interrupt_handled))
31328+ || atomic_read_unchecked(&data->interrupt_handled))
31329 return;
31330 }
31331
31332diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
31333index 378fcb5..5e91fa8 100644
31334--- a/drivers/i2c/busses/i2c-amd756-s4882.c
31335+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
31336@@ -43,7 +43,7 @@
31337 extern struct i2c_adapter amd756_smbus;
31338
31339 static struct i2c_adapter *s4882_adapter;
31340-static struct i2c_algorithm *s4882_algo;
31341+static i2c_algorithm_no_const *s4882_algo;
31342
31343 /* Wrapper access functions for multiplexed SMBus */
31344 static DEFINE_MUTEX(amd756_lock);
31345diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
31346index 29015eb..af2d8e9 100644
31347--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
31348+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
31349@@ -41,7 +41,7 @@
31350 extern struct i2c_adapter *nforce2_smbus;
31351
31352 static struct i2c_adapter *s4985_adapter;
31353-static struct i2c_algorithm *s4985_algo;
31354+static i2c_algorithm_no_const *s4985_algo;
31355
31356 /* Wrapper access functions for multiplexed SMBus */
31357 static DEFINE_MUTEX(nforce2_lock);
31358diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
31359index d7a4833..7fae376 100644
31360--- a/drivers/i2c/i2c-mux.c
31361+++ b/drivers/i2c/i2c-mux.c
31362@@ -28,7 +28,7 @@
31363 /* multiplexer per channel data */
31364 struct i2c_mux_priv {
31365 struct i2c_adapter adap;
31366- struct i2c_algorithm algo;
31367+ i2c_algorithm_no_const algo;
31368
31369 struct i2c_adapter *parent;
31370 void *mux_dev; /* the mux chip/device */
31371diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
31372index 57d00ca..0145194 100644
31373--- a/drivers/ide/aec62xx.c
31374+++ b/drivers/ide/aec62xx.c
31375@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
31376 .cable_detect = atp86x_cable_detect,
31377 };
31378
31379-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
31380+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
31381 { /* 0: AEC6210 */
31382 .name = DRV_NAME,
31383 .init_chipset = init_chipset_aec62xx,
31384diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
31385index 2c8016a..911a27c 100644
31386--- a/drivers/ide/alim15x3.c
31387+++ b/drivers/ide/alim15x3.c
31388@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
31389 .dma_sff_read_status = ide_dma_sff_read_status,
31390 };
31391
31392-static const struct ide_port_info ali15x3_chipset __devinitdata = {
31393+static const struct ide_port_info ali15x3_chipset __devinitconst = {
31394 .name = DRV_NAME,
31395 .init_chipset = init_chipset_ali15x3,
31396 .init_hwif = init_hwif_ali15x3,
31397diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
31398index 3747b25..56fc995 100644
31399--- a/drivers/ide/amd74xx.c
31400+++ b/drivers/ide/amd74xx.c
31401@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
31402 .udma_mask = udma, \
31403 }
31404
31405-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
31406+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
31407 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
31408 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
31409 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
31410diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
31411index 15f0ead..cb43480 100644
31412--- a/drivers/ide/atiixp.c
31413+++ b/drivers/ide/atiixp.c
31414@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
31415 .cable_detect = atiixp_cable_detect,
31416 };
31417
31418-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
31419+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
31420 { /* 0: IXP200/300/400/700 */
31421 .name = DRV_NAME,
31422 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
31423diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
31424index 5f80312..d1fc438 100644
31425--- a/drivers/ide/cmd64x.c
31426+++ b/drivers/ide/cmd64x.c
31427@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
31428 .dma_sff_read_status = ide_dma_sff_read_status,
31429 };
31430
31431-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
31432+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
31433 { /* 0: CMD643 */
31434 .name = DRV_NAME,
31435 .init_chipset = init_chipset_cmd64x,
31436diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
31437index 2c1e5f7..1444762 100644
31438--- a/drivers/ide/cs5520.c
31439+++ b/drivers/ide/cs5520.c
31440@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
31441 .set_dma_mode = cs5520_set_dma_mode,
31442 };
31443
31444-static const struct ide_port_info cyrix_chipset __devinitdata = {
31445+static const struct ide_port_info cyrix_chipset __devinitconst = {
31446 .name = DRV_NAME,
31447 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
31448 .port_ops = &cs5520_port_ops,
31449diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
31450index 4dc4eb9..49b40ad 100644
31451--- a/drivers/ide/cs5530.c
31452+++ b/drivers/ide/cs5530.c
31453@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
31454 .udma_filter = cs5530_udma_filter,
31455 };
31456
31457-static const struct ide_port_info cs5530_chipset __devinitdata = {
31458+static const struct ide_port_info cs5530_chipset __devinitconst = {
31459 .name = DRV_NAME,
31460 .init_chipset = init_chipset_cs5530,
31461 .init_hwif = init_hwif_cs5530,
31462diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
31463index 5059faf..18d4c85 100644
31464--- a/drivers/ide/cs5535.c
31465+++ b/drivers/ide/cs5535.c
31466@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
31467 .cable_detect = cs5535_cable_detect,
31468 };
31469
31470-static const struct ide_port_info cs5535_chipset __devinitdata = {
31471+static const struct ide_port_info cs5535_chipset __devinitconst = {
31472 .name = DRV_NAME,
31473 .port_ops = &cs5535_port_ops,
31474 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
31475diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
31476index 847553f..3ffb49d 100644
31477--- a/drivers/ide/cy82c693.c
31478+++ b/drivers/ide/cy82c693.c
31479@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
31480 .set_dma_mode = cy82c693_set_dma_mode,
31481 };
31482
31483-static const struct ide_port_info cy82c693_chipset __devinitdata = {
31484+static const struct ide_port_info cy82c693_chipset __devinitconst = {
31485 .name = DRV_NAME,
31486 .init_iops = init_iops_cy82c693,
31487 .port_ops = &cy82c693_port_ops,
31488diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
31489index 58c51cd..4aec3b8 100644
31490--- a/drivers/ide/hpt366.c
31491+++ b/drivers/ide/hpt366.c
31492@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
31493 }
31494 };
31495
31496-static const struct hpt_info hpt36x __devinitdata = {
31497+static const struct hpt_info hpt36x __devinitconst = {
31498 .chip_name = "HPT36x",
31499 .chip_type = HPT36x,
31500 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
31501@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
31502 .timings = &hpt36x_timings
31503 };
31504
31505-static const struct hpt_info hpt370 __devinitdata = {
31506+static const struct hpt_info hpt370 __devinitconst = {
31507 .chip_name = "HPT370",
31508 .chip_type = HPT370,
31509 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31510@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
31511 .timings = &hpt37x_timings
31512 };
31513
31514-static const struct hpt_info hpt370a __devinitdata = {
31515+static const struct hpt_info hpt370a __devinitconst = {
31516 .chip_name = "HPT370A",
31517 .chip_type = HPT370A,
31518 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31519@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
31520 .timings = &hpt37x_timings
31521 };
31522
31523-static const struct hpt_info hpt374 __devinitdata = {
31524+static const struct hpt_info hpt374 __devinitconst = {
31525 .chip_name = "HPT374",
31526 .chip_type = HPT374,
31527 .udma_mask = ATA_UDMA5,
31528@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
31529 .timings = &hpt37x_timings
31530 };
31531
31532-static const struct hpt_info hpt372 __devinitdata = {
31533+static const struct hpt_info hpt372 __devinitconst = {
31534 .chip_name = "HPT372",
31535 .chip_type = HPT372,
31536 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31537@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
31538 .timings = &hpt37x_timings
31539 };
31540
31541-static const struct hpt_info hpt372a __devinitdata = {
31542+static const struct hpt_info hpt372a __devinitconst = {
31543 .chip_name = "HPT372A",
31544 .chip_type = HPT372A,
31545 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31546@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
31547 .timings = &hpt37x_timings
31548 };
31549
31550-static const struct hpt_info hpt302 __devinitdata = {
31551+static const struct hpt_info hpt302 __devinitconst = {
31552 .chip_name = "HPT302",
31553 .chip_type = HPT302,
31554 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31555@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
31556 .timings = &hpt37x_timings
31557 };
31558
31559-static const struct hpt_info hpt371 __devinitdata = {
31560+static const struct hpt_info hpt371 __devinitconst = {
31561 .chip_name = "HPT371",
31562 .chip_type = HPT371,
31563 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31564@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
31565 .timings = &hpt37x_timings
31566 };
31567
31568-static const struct hpt_info hpt372n __devinitdata = {
31569+static const struct hpt_info hpt372n __devinitconst = {
31570 .chip_name = "HPT372N",
31571 .chip_type = HPT372N,
31572 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31573@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
31574 .timings = &hpt37x_timings
31575 };
31576
31577-static const struct hpt_info hpt302n __devinitdata = {
31578+static const struct hpt_info hpt302n __devinitconst = {
31579 .chip_name = "HPT302N",
31580 .chip_type = HPT302N,
31581 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31582@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
31583 .timings = &hpt37x_timings
31584 };
31585
31586-static const struct hpt_info hpt371n __devinitdata = {
31587+static const struct hpt_info hpt371n __devinitconst = {
31588 .chip_name = "HPT371N",
31589 .chip_type = HPT371N,
31590 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31591@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
31592 .dma_sff_read_status = ide_dma_sff_read_status,
31593 };
31594
31595-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
31596+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
31597 { /* 0: HPT36x */
31598 .name = DRV_NAME,
31599 .init_chipset = init_chipset_hpt366,
31600diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
31601index 8126824..55a2798 100644
31602--- a/drivers/ide/ide-cd.c
31603+++ b/drivers/ide/ide-cd.c
31604@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
31605 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
31606 if ((unsigned long)buf & alignment
31607 || blk_rq_bytes(rq) & q->dma_pad_mask
31608- || object_is_on_stack(buf))
31609+ || object_starts_on_stack(buf))
31610 drive->dma = 0;
31611 }
31612 }
31613diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
31614index 7f56b73..dab5b67 100644
31615--- a/drivers/ide/ide-pci-generic.c
31616+++ b/drivers/ide/ide-pci-generic.c
31617@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
31618 .udma_mask = ATA_UDMA6, \
31619 }
31620
31621-static const struct ide_port_info generic_chipsets[] __devinitdata = {
31622+static const struct ide_port_info generic_chipsets[] __devinitconst = {
31623 /* 0: Unknown */
31624 DECLARE_GENERIC_PCI_DEV(0),
31625
31626diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
31627index 560e66d..d5dd180 100644
31628--- a/drivers/ide/it8172.c
31629+++ b/drivers/ide/it8172.c
31630@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
31631 .set_dma_mode = it8172_set_dma_mode,
31632 };
31633
31634-static const struct ide_port_info it8172_port_info __devinitdata = {
31635+static const struct ide_port_info it8172_port_info __devinitconst = {
31636 .name = DRV_NAME,
31637 .port_ops = &it8172_port_ops,
31638 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
31639diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
31640index 46816ba..1847aeb 100644
31641--- a/drivers/ide/it8213.c
31642+++ b/drivers/ide/it8213.c
31643@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
31644 .cable_detect = it8213_cable_detect,
31645 };
31646
31647-static const struct ide_port_info it8213_chipset __devinitdata = {
31648+static const struct ide_port_info it8213_chipset __devinitconst = {
31649 .name = DRV_NAME,
31650 .enablebits = { {0x41, 0x80, 0x80} },
31651 .port_ops = &it8213_port_ops,
31652diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
31653index 2e3169f..c5611db 100644
31654--- a/drivers/ide/it821x.c
31655+++ b/drivers/ide/it821x.c
31656@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
31657 .cable_detect = it821x_cable_detect,
31658 };
31659
31660-static const struct ide_port_info it821x_chipset __devinitdata = {
31661+static const struct ide_port_info it821x_chipset __devinitconst = {
31662 .name = DRV_NAME,
31663 .init_chipset = init_chipset_it821x,
31664 .init_hwif = init_hwif_it821x,
31665diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
31666index 74c2c4a..efddd7d 100644
31667--- a/drivers/ide/jmicron.c
31668+++ b/drivers/ide/jmicron.c
31669@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
31670 .cable_detect = jmicron_cable_detect,
31671 };
31672
31673-static const struct ide_port_info jmicron_chipset __devinitdata = {
31674+static const struct ide_port_info jmicron_chipset __devinitconst = {
31675 .name = DRV_NAME,
31676 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
31677 .port_ops = &jmicron_port_ops,
31678diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
31679index 95327a2..73f78d8 100644
31680--- a/drivers/ide/ns87415.c
31681+++ b/drivers/ide/ns87415.c
31682@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
31683 .dma_sff_read_status = superio_dma_sff_read_status,
31684 };
31685
31686-static const struct ide_port_info ns87415_chipset __devinitdata = {
31687+static const struct ide_port_info ns87415_chipset __devinitconst = {
31688 .name = DRV_NAME,
31689 .init_hwif = init_hwif_ns87415,
31690 .tp_ops = &ns87415_tp_ops,
31691diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
31692index 1a53a4c..39edc66 100644
31693--- a/drivers/ide/opti621.c
31694+++ b/drivers/ide/opti621.c
31695@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
31696 .set_pio_mode = opti621_set_pio_mode,
31697 };
31698
31699-static const struct ide_port_info opti621_chipset __devinitdata = {
31700+static const struct ide_port_info opti621_chipset __devinitconst = {
31701 .name = DRV_NAME,
31702 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
31703 .port_ops = &opti621_port_ops,
31704diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
31705index 9546fe2..2e5ceb6 100644
31706--- a/drivers/ide/pdc202xx_new.c
31707+++ b/drivers/ide/pdc202xx_new.c
31708@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
31709 .udma_mask = udma, \
31710 }
31711
31712-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
31713+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
31714 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
31715 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
31716 };
31717diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
31718index 3a35ec6..5634510 100644
31719--- a/drivers/ide/pdc202xx_old.c
31720+++ b/drivers/ide/pdc202xx_old.c
31721@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
31722 .max_sectors = sectors, \
31723 }
31724
31725-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
31726+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
31727 { /* 0: PDC20246 */
31728 .name = DRV_NAME,
31729 .init_chipset = init_chipset_pdc202xx,
31730diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
31731index 1892e81..fe0fd60 100644
31732--- a/drivers/ide/piix.c
31733+++ b/drivers/ide/piix.c
31734@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
31735 .udma_mask = udma, \
31736 }
31737
31738-static const struct ide_port_info piix_pci_info[] __devinitdata = {
31739+static const struct ide_port_info piix_pci_info[] __devinitconst = {
31740 /* 0: MPIIX */
31741 { /*
31742 * MPIIX actually has only a single IDE channel mapped to
31743diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
31744index a6414a8..c04173e 100644
31745--- a/drivers/ide/rz1000.c
31746+++ b/drivers/ide/rz1000.c
31747@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
31748 }
31749 }
31750
31751-static const struct ide_port_info rz1000_chipset __devinitdata = {
31752+static const struct ide_port_info rz1000_chipset __devinitconst = {
31753 .name = DRV_NAME,
31754 .host_flags = IDE_HFLAG_NO_DMA,
31755 };
31756diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
31757index 356b9b5..d4758eb 100644
31758--- a/drivers/ide/sc1200.c
31759+++ b/drivers/ide/sc1200.c
31760@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
31761 .dma_sff_read_status = ide_dma_sff_read_status,
31762 };
31763
31764-static const struct ide_port_info sc1200_chipset __devinitdata = {
31765+static const struct ide_port_info sc1200_chipset __devinitconst = {
31766 .name = DRV_NAME,
31767 .port_ops = &sc1200_port_ops,
31768 .dma_ops = &sc1200_dma_ops,
31769diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
31770index b7f5b0c..9701038 100644
31771--- a/drivers/ide/scc_pata.c
31772+++ b/drivers/ide/scc_pata.c
31773@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
31774 .dma_sff_read_status = scc_dma_sff_read_status,
31775 };
31776
31777-static const struct ide_port_info scc_chipset __devinitdata = {
31778+static const struct ide_port_info scc_chipset __devinitconst = {
31779 .name = "sccIDE",
31780 .init_iops = init_iops_scc,
31781 .init_dma = scc_init_dma,
31782diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
31783index 35fb8da..24d72ef 100644
31784--- a/drivers/ide/serverworks.c
31785+++ b/drivers/ide/serverworks.c
31786@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
31787 .cable_detect = svwks_cable_detect,
31788 };
31789
31790-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
31791+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
31792 { /* 0: OSB4 */
31793 .name = DRV_NAME,
31794 .init_chipset = init_chipset_svwks,
31795diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
31796index ddeda44..46f7e30 100644
31797--- a/drivers/ide/siimage.c
31798+++ b/drivers/ide/siimage.c
31799@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
31800 .udma_mask = ATA_UDMA6, \
31801 }
31802
31803-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
31804+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
31805 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
31806 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
31807 };
31808diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
31809index 4a00225..09e61b4 100644
31810--- a/drivers/ide/sis5513.c
31811+++ b/drivers/ide/sis5513.c
31812@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
31813 .cable_detect = sis_cable_detect,
31814 };
31815
31816-static const struct ide_port_info sis5513_chipset __devinitdata = {
31817+static const struct ide_port_info sis5513_chipset __devinitconst = {
31818 .name = DRV_NAME,
31819 .init_chipset = init_chipset_sis5513,
31820 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
31821diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
31822index f21dc2a..d051cd2 100644
31823--- a/drivers/ide/sl82c105.c
31824+++ b/drivers/ide/sl82c105.c
31825@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
31826 .dma_sff_read_status = ide_dma_sff_read_status,
31827 };
31828
31829-static const struct ide_port_info sl82c105_chipset __devinitdata = {
31830+static const struct ide_port_info sl82c105_chipset __devinitconst = {
31831 .name = DRV_NAME,
31832 .init_chipset = init_chipset_sl82c105,
31833 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
31834diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
31835index 864ffe0..863a5e9 100644
31836--- a/drivers/ide/slc90e66.c
31837+++ b/drivers/ide/slc90e66.c
31838@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
31839 .cable_detect = slc90e66_cable_detect,
31840 };
31841
31842-static const struct ide_port_info slc90e66_chipset __devinitdata = {
31843+static const struct ide_port_info slc90e66_chipset __devinitconst = {
31844 .name = DRV_NAME,
31845 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
31846 .port_ops = &slc90e66_port_ops,
31847diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
31848index 4799d5c..1794678 100644
31849--- a/drivers/ide/tc86c001.c
31850+++ b/drivers/ide/tc86c001.c
31851@@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
31852 .dma_sff_read_status = ide_dma_sff_read_status,
31853 };
31854
31855-static const struct ide_port_info tc86c001_chipset __devinitdata = {
31856+static const struct ide_port_info tc86c001_chipset __devinitconst = {
31857 .name = DRV_NAME,
31858 .init_hwif = init_hwif_tc86c001,
31859 .port_ops = &tc86c001_port_ops,
31860diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
31861index 281c914..55ce1b8 100644
31862--- a/drivers/ide/triflex.c
31863+++ b/drivers/ide/triflex.c
31864@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
31865 .set_dma_mode = triflex_set_mode,
31866 };
31867
31868-static const struct ide_port_info triflex_device __devinitdata = {
31869+static const struct ide_port_info triflex_device __devinitconst = {
31870 .name = DRV_NAME,
31871 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
31872 .port_ops = &triflex_port_ops,
31873diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
31874index 4b42ca0..e494a98 100644
31875--- a/drivers/ide/trm290.c
31876+++ b/drivers/ide/trm290.c
31877@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
31878 .dma_check = trm290_dma_check,
31879 };
31880
31881-static const struct ide_port_info trm290_chipset __devinitdata = {
31882+static const struct ide_port_info trm290_chipset __devinitconst = {
31883 .name = DRV_NAME,
31884 .init_hwif = init_hwif_trm290,
31885 .tp_ops = &trm290_tp_ops,
31886diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
31887index f46f49c..eb77678 100644
31888--- a/drivers/ide/via82cxxx.c
31889+++ b/drivers/ide/via82cxxx.c
31890@@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
31891 .cable_detect = via82cxxx_cable_detect,
31892 };
31893
31894-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
31895+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
31896 .name = DRV_NAME,
31897 .init_chipset = init_chipset_via82cxxx,
31898 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
31899diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
31900index 73d4531..c90cd2d 100644
31901--- a/drivers/ieee802154/fakehard.c
31902+++ b/drivers/ieee802154/fakehard.c
31903@@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
31904 phy->transmit_power = 0xbf;
31905
31906 dev->netdev_ops = &fake_ops;
31907- dev->ml_priv = &fake_mlme;
31908+ dev->ml_priv = (void *)&fake_mlme;
31909
31910 priv = netdev_priv(dev);
31911 priv->phy = phy;
31912diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
31913index c889aae..6cf5aa7 100644
31914--- a/drivers/infiniband/core/cm.c
31915+++ b/drivers/infiniband/core/cm.c
31916@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
31917
31918 struct cm_counter_group {
31919 struct kobject obj;
31920- atomic_long_t counter[CM_ATTR_COUNT];
31921+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
31922 };
31923
31924 struct cm_counter_attribute {
31925@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
31926 struct ib_mad_send_buf *msg = NULL;
31927 int ret;
31928
31929- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31930+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31931 counter[CM_REQ_COUNTER]);
31932
31933 /* Quick state check to discard duplicate REQs. */
31934@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
31935 if (!cm_id_priv)
31936 return;
31937
31938- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31939+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31940 counter[CM_REP_COUNTER]);
31941 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
31942 if (ret)
31943@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
31944 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
31945 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
31946 spin_unlock_irq(&cm_id_priv->lock);
31947- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31948+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31949 counter[CM_RTU_COUNTER]);
31950 goto out;
31951 }
31952@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
31953 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
31954 dreq_msg->local_comm_id);
31955 if (!cm_id_priv) {
31956- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31957+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31958 counter[CM_DREQ_COUNTER]);
31959 cm_issue_drep(work->port, work->mad_recv_wc);
31960 return -EINVAL;
31961@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
31962 case IB_CM_MRA_REP_RCVD:
31963 break;
31964 case IB_CM_TIMEWAIT:
31965- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31966+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31967 counter[CM_DREQ_COUNTER]);
31968 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
31969 goto unlock;
31970@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
31971 cm_free_msg(msg);
31972 goto deref;
31973 case IB_CM_DREQ_RCVD:
31974- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31975+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31976 counter[CM_DREQ_COUNTER]);
31977 goto unlock;
31978 default:
31979@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
31980 ib_modify_mad(cm_id_priv->av.port->mad_agent,
31981 cm_id_priv->msg, timeout)) {
31982 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
31983- atomic_long_inc(&work->port->
31984+ atomic_long_inc_unchecked(&work->port->
31985 counter_group[CM_RECV_DUPLICATES].
31986 counter[CM_MRA_COUNTER]);
31987 goto out;
31988@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
31989 break;
31990 case IB_CM_MRA_REQ_RCVD:
31991 case IB_CM_MRA_REP_RCVD:
31992- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31993+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31994 counter[CM_MRA_COUNTER]);
31995 /* fall through */
31996 default:
31997@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
31998 case IB_CM_LAP_IDLE:
31999 break;
32000 case IB_CM_MRA_LAP_SENT:
32001- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32002+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32003 counter[CM_LAP_COUNTER]);
32004 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
32005 goto unlock;
32006@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
32007 cm_free_msg(msg);
32008 goto deref;
32009 case IB_CM_LAP_RCVD:
32010- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32011+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32012 counter[CM_LAP_COUNTER]);
32013 goto unlock;
32014 default:
32015@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
32016 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
32017 if (cur_cm_id_priv) {
32018 spin_unlock_irq(&cm.lock);
32019- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32020+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32021 counter[CM_SIDR_REQ_COUNTER]);
32022 goto out; /* Duplicate message. */
32023 }
32024@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
32025 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
32026 msg->retries = 1;
32027
32028- atomic_long_add(1 + msg->retries,
32029+ atomic_long_add_unchecked(1 + msg->retries,
32030 &port->counter_group[CM_XMIT].counter[attr_index]);
32031 if (msg->retries)
32032- atomic_long_add(msg->retries,
32033+ atomic_long_add_unchecked(msg->retries,
32034 &port->counter_group[CM_XMIT_RETRIES].
32035 counter[attr_index]);
32036
32037@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
32038 }
32039
32040 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
32041- atomic_long_inc(&port->counter_group[CM_RECV].
32042+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
32043 counter[attr_id - CM_ATTR_ID_OFFSET]);
32044
32045 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
32046@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
32047 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
32048
32049 return sprintf(buf, "%ld\n",
32050- atomic_long_read(&group->counter[cm_attr->index]));
32051+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
32052 }
32053
32054 static const struct sysfs_ops cm_counter_ops = {
32055diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
32056index 176c8f9..2627b62 100644
32057--- a/drivers/infiniband/core/fmr_pool.c
32058+++ b/drivers/infiniband/core/fmr_pool.c
32059@@ -98,8 +98,8 @@ struct ib_fmr_pool {
32060
32061 struct task_struct *thread;
32062
32063- atomic_t req_ser;
32064- atomic_t flush_ser;
32065+ atomic_unchecked_t req_ser;
32066+ atomic_unchecked_t flush_ser;
32067
32068 wait_queue_head_t force_wait;
32069 };
32070@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32071 struct ib_fmr_pool *pool = pool_ptr;
32072
32073 do {
32074- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
32075+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
32076 ib_fmr_batch_release(pool);
32077
32078- atomic_inc(&pool->flush_ser);
32079+ atomic_inc_unchecked(&pool->flush_ser);
32080 wake_up_interruptible(&pool->force_wait);
32081
32082 if (pool->flush_function)
32083@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32084 }
32085
32086 set_current_state(TASK_INTERRUPTIBLE);
32087- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
32088+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
32089 !kthread_should_stop())
32090 schedule();
32091 __set_current_state(TASK_RUNNING);
32092@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
32093 pool->dirty_watermark = params->dirty_watermark;
32094 pool->dirty_len = 0;
32095 spin_lock_init(&pool->pool_lock);
32096- atomic_set(&pool->req_ser, 0);
32097- atomic_set(&pool->flush_ser, 0);
32098+ atomic_set_unchecked(&pool->req_ser, 0);
32099+ atomic_set_unchecked(&pool->flush_ser, 0);
32100 init_waitqueue_head(&pool->force_wait);
32101
32102 pool->thread = kthread_run(ib_fmr_cleanup_thread,
32103@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
32104 }
32105 spin_unlock_irq(&pool->pool_lock);
32106
32107- serial = atomic_inc_return(&pool->req_ser);
32108+ serial = atomic_inc_return_unchecked(&pool->req_ser);
32109 wake_up_process(pool->thread);
32110
32111 if (wait_event_interruptible(pool->force_wait,
32112- atomic_read(&pool->flush_ser) - serial >= 0))
32113+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
32114 return -EINTR;
32115
32116 return 0;
32117@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
32118 } else {
32119 list_add_tail(&fmr->list, &pool->dirty_list);
32120 if (++pool->dirty_len >= pool->dirty_watermark) {
32121- atomic_inc(&pool->req_ser);
32122+ atomic_inc_unchecked(&pool->req_ser);
32123 wake_up_process(pool->thread);
32124 }
32125 }
32126diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
32127index 40c8353..946b0e4 100644
32128--- a/drivers/infiniband/hw/cxgb4/mem.c
32129+++ b/drivers/infiniband/hw/cxgb4/mem.c
32130@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32131 int err;
32132 struct fw_ri_tpte tpt;
32133 u32 stag_idx;
32134- static atomic_t key;
32135+ static atomic_unchecked_t key;
32136
32137 if (c4iw_fatal_error(rdev))
32138 return -EIO;
32139@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32140 &rdev->resource.tpt_fifo_lock);
32141 if (!stag_idx)
32142 return -ENOMEM;
32143- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
32144+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
32145 }
32146 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
32147 __func__, stag_state, type, pdid, stag_idx);
32148diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
32149index 79b3dbc..96e5fcc 100644
32150--- a/drivers/infiniband/hw/ipath/ipath_rc.c
32151+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
32152@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32153 struct ib_atomic_eth *ateth;
32154 struct ipath_ack_entry *e;
32155 u64 vaddr;
32156- atomic64_t *maddr;
32157+ atomic64_unchecked_t *maddr;
32158 u64 sdata;
32159 u32 rkey;
32160 u8 next;
32161@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32162 IB_ACCESS_REMOTE_ATOMIC)))
32163 goto nack_acc_unlck;
32164 /* Perform atomic OP and save result. */
32165- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32166+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32167 sdata = be64_to_cpu(ateth->swap_data);
32168 e = &qp->s_ack_queue[qp->r_head_ack_queue];
32169 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
32170- (u64) atomic64_add_return(sdata, maddr) - sdata :
32171+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32172 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32173 be64_to_cpu(ateth->compare_data),
32174 sdata);
32175diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
32176index 1f95bba..9530f87 100644
32177--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
32178+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
32179@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
32180 unsigned long flags;
32181 struct ib_wc wc;
32182 u64 sdata;
32183- atomic64_t *maddr;
32184+ atomic64_unchecked_t *maddr;
32185 enum ib_wc_status send_status;
32186
32187 /*
32188@@ -382,11 +382,11 @@ again:
32189 IB_ACCESS_REMOTE_ATOMIC)))
32190 goto acc_err;
32191 /* Perform atomic OP and save result. */
32192- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32193+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32194 sdata = wqe->wr.wr.atomic.compare_add;
32195 *(u64 *) sqp->s_sge.sge.vaddr =
32196 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
32197- (u64) atomic64_add_return(sdata, maddr) - sdata :
32198+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32199 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32200 sdata, wqe->wr.wr.atomic.swap);
32201 goto send_comp;
32202diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
32203index 7140199..da60063 100644
32204--- a/drivers/infiniband/hw/nes/nes.c
32205+++ b/drivers/infiniband/hw/nes/nes.c
32206@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
32207 LIST_HEAD(nes_adapter_list);
32208 static LIST_HEAD(nes_dev_list);
32209
32210-atomic_t qps_destroyed;
32211+atomic_unchecked_t qps_destroyed;
32212
32213 static unsigned int ee_flsh_adapter;
32214 static unsigned int sysfs_nonidx_addr;
32215@@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
32216 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
32217 struct nes_adapter *nesadapter = nesdev->nesadapter;
32218
32219- atomic_inc(&qps_destroyed);
32220+ atomic_inc_unchecked(&qps_destroyed);
32221
32222 /* Free the control structures */
32223
32224diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
32225index c438e46..ca30356 100644
32226--- a/drivers/infiniband/hw/nes/nes.h
32227+++ b/drivers/infiniband/hw/nes/nes.h
32228@@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
32229 extern unsigned int wqm_quanta;
32230 extern struct list_head nes_adapter_list;
32231
32232-extern atomic_t cm_connects;
32233-extern atomic_t cm_accepts;
32234-extern atomic_t cm_disconnects;
32235-extern atomic_t cm_closes;
32236-extern atomic_t cm_connecteds;
32237-extern atomic_t cm_connect_reqs;
32238-extern atomic_t cm_rejects;
32239-extern atomic_t mod_qp_timouts;
32240-extern atomic_t qps_created;
32241-extern atomic_t qps_destroyed;
32242-extern atomic_t sw_qps_destroyed;
32243+extern atomic_unchecked_t cm_connects;
32244+extern atomic_unchecked_t cm_accepts;
32245+extern atomic_unchecked_t cm_disconnects;
32246+extern atomic_unchecked_t cm_closes;
32247+extern atomic_unchecked_t cm_connecteds;
32248+extern atomic_unchecked_t cm_connect_reqs;
32249+extern atomic_unchecked_t cm_rejects;
32250+extern atomic_unchecked_t mod_qp_timouts;
32251+extern atomic_unchecked_t qps_created;
32252+extern atomic_unchecked_t qps_destroyed;
32253+extern atomic_unchecked_t sw_qps_destroyed;
32254 extern u32 mh_detected;
32255 extern u32 mh_pauses_sent;
32256 extern u32 cm_packets_sent;
32257@@ -197,16 +197,16 @@ extern u32 cm_packets_created;
32258 extern u32 cm_packets_received;
32259 extern u32 cm_packets_dropped;
32260 extern u32 cm_packets_retrans;
32261-extern atomic_t cm_listens_created;
32262-extern atomic_t cm_listens_destroyed;
32263+extern atomic_unchecked_t cm_listens_created;
32264+extern atomic_unchecked_t cm_listens_destroyed;
32265 extern u32 cm_backlog_drops;
32266-extern atomic_t cm_loopbacks;
32267-extern atomic_t cm_nodes_created;
32268-extern atomic_t cm_nodes_destroyed;
32269-extern atomic_t cm_accel_dropped_pkts;
32270-extern atomic_t cm_resets_recvd;
32271-extern atomic_t pau_qps_created;
32272-extern atomic_t pau_qps_destroyed;
32273+extern atomic_unchecked_t cm_loopbacks;
32274+extern atomic_unchecked_t cm_nodes_created;
32275+extern atomic_unchecked_t cm_nodes_destroyed;
32276+extern atomic_unchecked_t cm_accel_dropped_pkts;
32277+extern atomic_unchecked_t cm_resets_recvd;
32278+extern atomic_unchecked_t pau_qps_created;
32279+extern atomic_unchecked_t pau_qps_destroyed;
32280
32281 extern u32 int_mod_timer_init;
32282 extern u32 int_mod_cq_depth_256;
32283diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
32284index 71edfbb..15b62ae 100644
32285--- a/drivers/infiniband/hw/nes/nes_cm.c
32286+++ b/drivers/infiniband/hw/nes/nes_cm.c
32287@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
32288 u32 cm_packets_retrans;
32289 u32 cm_packets_created;
32290 u32 cm_packets_received;
32291-atomic_t cm_listens_created;
32292-atomic_t cm_listens_destroyed;
32293+atomic_unchecked_t cm_listens_created;
32294+atomic_unchecked_t cm_listens_destroyed;
32295 u32 cm_backlog_drops;
32296-atomic_t cm_loopbacks;
32297-atomic_t cm_nodes_created;
32298-atomic_t cm_nodes_destroyed;
32299-atomic_t cm_accel_dropped_pkts;
32300-atomic_t cm_resets_recvd;
32301+atomic_unchecked_t cm_loopbacks;
32302+atomic_unchecked_t cm_nodes_created;
32303+atomic_unchecked_t cm_nodes_destroyed;
32304+atomic_unchecked_t cm_accel_dropped_pkts;
32305+atomic_unchecked_t cm_resets_recvd;
32306
32307 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
32308 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
32309@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
32310
32311 static struct nes_cm_core *g_cm_core;
32312
32313-atomic_t cm_connects;
32314-atomic_t cm_accepts;
32315-atomic_t cm_disconnects;
32316-atomic_t cm_closes;
32317-atomic_t cm_connecteds;
32318-atomic_t cm_connect_reqs;
32319-atomic_t cm_rejects;
32320+atomic_unchecked_t cm_connects;
32321+atomic_unchecked_t cm_accepts;
32322+atomic_unchecked_t cm_disconnects;
32323+atomic_unchecked_t cm_closes;
32324+atomic_unchecked_t cm_connecteds;
32325+atomic_unchecked_t cm_connect_reqs;
32326+atomic_unchecked_t cm_rejects;
32327
32328 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
32329 {
32330@@ -1279,7 +1279,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
32331 kfree(listener);
32332 listener = NULL;
32333 ret = 0;
32334- atomic_inc(&cm_listens_destroyed);
32335+ atomic_inc_unchecked(&cm_listens_destroyed);
32336 } else {
32337 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
32338 }
32339@@ -1482,7 +1482,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
32340 cm_node->rem_mac);
32341
32342 add_hte_node(cm_core, cm_node);
32343- atomic_inc(&cm_nodes_created);
32344+ atomic_inc_unchecked(&cm_nodes_created);
32345
32346 return cm_node;
32347 }
32348@@ -1540,7 +1540,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
32349 }
32350
32351 atomic_dec(&cm_core->node_cnt);
32352- atomic_inc(&cm_nodes_destroyed);
32353+ atomic_inc_unchecked(&cm_nodes_destroyed);
32354 nesqp = cm_node->nesqp;
32355 if (nesqp) {
32356 nesqp->cm_node = NULL;
32357@@ -1604,7 +1604,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
32358
32359 static void drop_packet(struct sk_buff *skb)
32360 {
32361- atomic_inc(&cm_accel_dropped_pkts);
32362+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
32363 dev_kfree_skb_any(skb);
32364 }
32365
32366@@ -1667,7 +1667,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
32367 {
32368
32369 int reset = 0; /* whether to send reset in case of err.. */
32370- atomic_inc(&cm_resets_recvd);
32371+ atomic_inc_unchecked(&cm_resets_recvd);
32372 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
32373 " refcnt=%d\n", cm_node, cm_node->state,
32374 atomic_read(&cm_node->ref_count));
32375@@ -2308,7 +2308,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
32376 rem_ref_cm_node(cm_node->cm_core, cm_node);
32377 return NULL;
32378 }
32379- atomic_inc(&cm_loopbacks);
32380+ atomic_inc_unchecked(&cm_loopbacks);
32381 loopbackremotenode->loopbackpartner = cm_node;
32382 loopbackremotenode->tcp_cntxt.rcv_wscale =
32383 NES_CM_DEFAULT_RCV_WND_SCALE;
32384@@ -2583,7 +2583,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
32385 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
32386 else {
32387 rem_ref_cm_node(cm_core, cm_node);
32388- atomic_inc(&cm_accel_dropped_pkts);
32389+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
32390 dev_kfree_skb_any(skb);
32391 }
32392 break;
32393@@ -2890,7 +2890,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32394
32395 if ((cm_id) && (cm_id->event_handler)) {
32396 if (issue_disconn) {
32397- atomic_inc(&cm_disconnects);
32398+ atomic_inc_unchecked(&cm_disconnects);
32399 cm_event.event = IW_CM_EVENT_DISCONNECT;
32400 cm_event.status = disconn_status;
32401 cm_event.local_addr = cm_id->local_addr;
32402@@ -2912,7 +2912,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32403 }
32404
32405 if (issue_close) {
32406- atomic_inc(&cm_closes);
32407+ atomic_inc_unchecked(&cm_closes);
32408 nes_disconnect(nesqp, 1);
32409
32410 cm_id->provider_data = nesqp;
32411@@ -3048,7 +3048,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32412
32413 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
32414 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
32415- atomic_inc(&cm_accepts);
32416+ atomic_inc_unchecked(&cm_accepts);
32417
32418 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
32419 netdev_refcnt_read(nesvnic->netdev));
32420@@ -3250,7 +3250,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
32421 struct nes_cm_core *cm_core;
32422 u8 *start_buff;
32423
32424- atomic_inc(&cm_rejects);
32425+ atomic_inc_unchecked(&cm_rejects);
32426 cm_node = (struct nes_cm_node *)cm_id->provider_data;
32427 loopback = cm_node->loopbackpartner;
32428 cm_core = cm_node->cm_core;
32429@@ -3310,7 +3310,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32430 ntohl(cm_id->local_addr.sin_addr.s_addr),
32431 ntohs(cm_id->local_addr.sin_port));
32432
32433- atomic_inc(&cm_connects);
32434+ atomic_inc_unchecked(&cm_connects);
32435 nesqp->active_conn = 1;
32436
32437 /* cache the cm_id in the qp */
32438@@ -3416,7 +3416,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
32439 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
32440 return err;
32441 }
32442- atomic_inc(&cm_listens_created);
32443+ atomic_inc_unchecked(&cm_listens_created);
32444 }
32445
32446 cm_id->add_ref(cm_id);
32447@@ -3517,7 +3517,7 @@ static void cm_event_connected(struct nes_cm_event *event)
32448
32449 if (nesqp->destroyed)
32450 return;
32451- atomic_inc(&cm_connecteds);
32452+ atomic_inc_unchecked(&cm_connecteds);
32453 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
32454 " local port 0x%04X. jiffies = %lu.\n",
32455 nesqp->hwqp.qp_id,
32456@@ -3704,7 +3704,7 @@ static void cm_event_reset(struct nes_cm_event *event)
32457
32458 cm_id->add_ref(cm_id);
32459 ret = cm_id->event_handler(cm_id, &cm_event);
32460- atomic_inc(&cm_closes);
32461+ atomic_inc_unchecked(&cm_closes);
32462 cm_event.event = IW_CM_EVENT_CLOSE;
32463 cm_event.status = 0;
32464 cm_event.provider_data = cm_id->provider_data;
32465@@ -3740,7 +3740,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
32466 return;
32467 cm_id = cm_node->cm_id;
32468
32469- atomic_inc(&cm_connect_reqs);
32470+ atomic_inc_unchecked(&cm_connect_reqs);
32471 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32472 cm_node, cm_id, jiffies);
32473
32474@@ -3780,7 +3780,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
32475 return;
32476 cm_id = cm_node->cm_id;
32477
32478- atomic_inc(&cm_connect_reqs);
32479+ atomic_inc_unchecked(&cm_connect_reqs);
32480 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32481 cm_node, cm_id, jiffies);
32482
32483diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
32484index 3ba7be3..c81f6ff 100644
32485--- a/drivers/infiniband/hw/nes/nes_mgt.c
32486+++ b/drivers/infiniband/hw/nes/nes_mgt.c
32487@@ -40,8 +40,8 @@
32488 #include "nes.h"
32489 #include "nes_mgt.h"
32490
32491-atomic_t pau_qps_created;
32492-atomic_t pau_qps_destroyed;
32493+atomic_unchecked_t pau_qps_created;
32494+atomic_unchecked_t pau_qps_destroyed;
32495
32496 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
32497 {
32498@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
32499 {
32500 struct sk_buff *skb;
32501 unsigned long flags;
32502- atomic_inc(&pau_qps_destroyed);
32503+ atomic_inc_unchecked(&pau_qps_destroyed);
32504
32505 /* Free packets that have not yet been forwarded */
32506 /* Lock is acquired by skb_dequeue when removing the skb */
32507@@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
32508 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
32509 skb_queue_head_init(&nesqp->pau_list);
32510 spin_lock_init(&nesqp->pau_lock);
32511- atomic_inc(&pau_qps_created);
32512+ atomic_inc_unchecked(&pau_qps_created);
32513 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
32514 }
32515
32516diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
32517index f3a3ecf..57d311d 100644
32518--- a/drivers/infiniband/hw/nes/nes_nic.c
32519+++ b/drivers/infiniband/hw/nes/nes_nic.c
32520@@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
32521 target_stat_values[++index] = mh_detected;
32522 target_stat_values[++index] = mh_pauses_sent;
32523 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
32524- target_stat_values[++index] = atomic_read(&cm_connects);
32525- target_stat_values[++index] = atomic_read(&cm_accepts);
32526- target_stat_values[++index] = atomic_read(&cm_disconnects);
32527- target_stat_values[++index] = atomic_read(&cm_connecteds);
32528- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
32529- target_stat_values[++index] = atomic_read(&cm_rejects);
32530- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
32531- target_stat_values[++index] = atomic_read(&qps_created);
32532- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
32533- target_stat_values[++index] = atomic_read(&qps_destroyed);
32534- target_stat_values[++index] = atomic_read(&cm_closes);
32535+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
32536+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
32537+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
32538+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
32539+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
32540+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
32541+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
32542+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
32543+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
32544+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
32545+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
32546 target_stat_values[++index] = cm_packets_sent;
32547 target_stat_values[++index] = cm_packets_bounced;
32548 target_stat_values[++index] = cm_packets_created;
32549 target_stat_values[++index] = cm_packets_received;
32550 target_stat_values[++index] = cm_packets_dropped;
32551 target_stat_values[++index] = cm_packets_retrans;
32552- target_stat_values[++index] = atomic_read(&cm_listens_created);
32553- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
32554+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
32555+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
32556 target_stat_values[++index] = cm_backlog_drops;
32557- target_stat_values[++index] = atomic_read(&cm_loopbacks);
32558- target_stat_values[++index] = atomic_read(&cm_nodes_created);
32559- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
32560- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
32561- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
32562+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
32563+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
32564+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
32565+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
32566+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
32567 target_stat_values[++index] = nesadapter->free_4kpbl;
32568 target_stat_values[++index] = nesadapter->free_256pbl;
32569 target_stat_values[++index] = int_mod_timer_init;
32570 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
32571 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
32572 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
32573- target_stat_values[++index] = atomic_read(&pau_qps_created);
32574- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
32575+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
32576+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
32577 }
32578
32579 /**
32580diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
32581index 8b8812d..a5e1133 100644
32582--- a/drivers/infiniband/hw/nes/nes_verbs.c
32583+++ b/drivers/infiniband/hw/nes/nes_verbs.c
32584@@ -46,9 +46,9 @@
32585
32586 #include <rdma/ib_umem.h>
32587
32588-atomic_t mod_qp_timouts;
32589-atomic_t qps_created;
32590-atomic_t sw_qps_destroyed;
32591+atomic_unchecked_t mod_qp_timouts;
32592+atomic_unchecked_t qps_created;
32593+atomic_unchecked_t sw_qps_destroyed;
32594
32595 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
32596
32597@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
32598 if (init_attr->create_flags)
32599 return ERR_PTR(-EINVAL);
32600
32601- atomic_inc(&qps_created);
32602+ atomic_inc_unchecked(&qps_created);
32603 switch (init_attr->qp_type) {
32604 case IB_QPT_RC:
32605 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
32606@@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
32607 struct iw_cm_event cm_event;
32608 int ret = 0;
32609
32610- atomic_inc(&sw_qps_destroyed);
32611+ atomic_inc_unchecked(&sw_qps_destroyed);
32612 nesqp->destroyed = 1;
32613
32614 /* Blow away the connection if it exists. */
32615diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
32616index 6b811e3..f8acf88 100644
32617--- a/drivers/infiniband/hw/qib/qib.h
32618+++ b/drivers/infiniband/hw/qib/qib.h
32619@@ -51,6 +51,7 @@
32620 #include <linux/completion.h>
32621 #include <linux/kref.h>
32622 #include <linux/sched.h>
32623+#include <linux/slab.h>
32624
32625 #include "qib_common.h"
32626 #include "qib_verbs.h"
32627diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
32628index da739d9..da1c7f4 100644
32629--- a/drivers/input/gameport/gameport.c
32630+++ b/drivers/input/gameport/gameport.c
32631@@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
32632 */
32633 static void gameport_init_port(struct gameport *gameport)
32634 {
32635- static atomic_t gameport_no = ATOMIC_INIT(0);
32636+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
32637
32638 __module_get(THIS_MODULE);
32639
32640 mutex_init(&gameport->drv_mutex);
32641 device_initialize(&gameport->dev);
32642 dev_set_name(&gameport->dev, "gameport%lu",
32643- (unsigned long)atomic_inc_return(&gameport_no) - 1);
32644+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
32645 gameport->dev.bus = &gameport_bus;
32646 gameport->dev.release = gameport_release_port;
32647 if (gameport->parent)
32648diff --git a/drivers/input/input.c b/drivers/input/input.c
32649index 8921c61..f5cd63d 100644
32650--- a/drivers/input/input.c
32651+++ b/drivers/input/input.c
32652@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
32653 */
32654 int input_register_device(struct input_dev *dev)
32655 {
32656- static atomic_t input_no = ATOMIC_INIT(0);
32657+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
32658 struct input_handler *handler;
32659 const char *path;
32660 int error;
32661@@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
32662 dev->setkeycode = input_default_setkeycode;
32663
32664 dev_set_name(&dev->dev, "input%ld",
32665- (unsigned long) atomic_inc_return(&input_no) - 1);
32666+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
32667
32668 error = device_add(&dev->dev);
32669 if (error)
32670diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
32671index b8d8611..7a4a04b 100644
32672--- a/drivers/input/joystick/sidewinder.c
32673+++ b/drivers/input/joystick/sidewinder.c
32674@@ -30,6 +30,7 @@
32675 #include <linux/kernel.h>
32676 #include <linux/module.h>
32677 #include <linux/slab.h>
32678+#include <linux/sched.h>
32679 #include <linux/init.h>
32680 #include <linux/input.h>
32681 #include <linux/gameport.h>
32682diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
32683index fd7a0d5..a4af10c 100644
32684--- a/drivers/input/joystick/xpad.c
32685+++ b/drivers/input/joystick/xpad.c
32686@@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
32687
32688 static int xpad_led_probe(struct usb_xpad *xpad)
32689 {
32690- static atomic_t led_seq = ATOMIC_INIT(0);
32691+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
32692 long led_no;
32693 struct xpad_led *led;
32694 struct led_classdev *led_cdev;
32695@@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
32696 if (!led)
32697 return -ENOMEM;
32698
32699- led_no = (long)atomic_inc_return(&led_seq) - 1;
32700+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
32701
32702 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
32703 led->xpad = xpad;
32704diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
32705index 0110b5a..d3ad144 100644
32706--- a/drivers/input/mousedev.c
32707+++ b/drivers/input/mousedev.c
32708@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
32709
32710 spin_unlock_irq(&client->packet_lock);
32711
32712- if (copy_to_user(buffer, data, count))
32713+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
32714 return -EFAULT;
32715
32716 return count;
32717diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
32718index d0f7533..fb8215b 100644
32719--- a/drivers/input/serio/serio.c
32720+++ b/drivers/input/serio/serio.c
32721@@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
32722 */
32723 static void serio_init_port(struct serio *serio)
32724 {
32725- static atomic_t serio_no = ATOMIC_INIT(0);
32726+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
32727
32728 __module_get(THIS_MODULE);
32729
32730@@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
32731 mutex_init(&serio->drv_mutex);
32732 device_initialize(&serio->dev);
32733 dev_set_name(&serio->dev, "serio%ld",
32734- (long)atomic_inc_return(&serio_no) - 1);
32735+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
32736 serio->dev.bus = &serio_bus;
32737 serio->dev.release = serio_release_port;
32738 serio->dev.groups = serio_device_attr_groups;
32739diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
32740index b902794..fc7b85b 100644
32741--- a/drivers/isdn/capi/capi.c
32742+++ b/drivers/isdn/capi/capi.c
32743@@ -83,8 +83,8 @@ struct capiminor {
32744
32745 struct capi20_appl *ap;
32746 u32 ncci;
32747- atomic_t datahandle;
32748- atomic_t msgid;
32749+ atomic_unchecked_t datahandle;
32750+ atomic_unchecked_t msgid;
32751
32752 struct tty_port port;
32753 int ttyinstop;
32754@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
32755 capimsg_setu16(s, 2, mp->ap->applid);
32756 capimsg_setu8 (s, 4, CAPI_DATA_B3);
32757 capimsg_setu8 (s, 5, CAPI_RESP);
32758- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
32759+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
32760 capimsg_setu32(s, 8, mp->ncci);
32761 capimsg_setu16(s, 12, datahandle);
32762 }
32763@@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
32764 mp->outbytes -= len;
32765 spin_unlock_bh(&mp->outlock);
32766
32767- datahandle = atomic_inc_return(&mp->datahandle);
32768+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
32769 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
32770 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32771 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32772 capimsg_setu16(skb->data, 2, mp->ap->applid);
32773 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
32774 capimsg_setu8 (skb->data, 5, CAPI_REQ);
32775- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
32776+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
32777 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
32778 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
32779 capimsg_setu16(skb->data, 16, len); /* Data length */
32780diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
32781index 821f7ac..28d4030 100644
32782--- a/drivers/isdn/hardware/avm/b1.c
32783+++ b/drivers/isdn/hardware/avm/b1.c
32784@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
32785 }
32786 if (left) {
32787 if (t4file->user) {
32788- if (copy_from_user(buf, dp, left))
32789+ if (left > sizeof buf || copy_from_user(buf, dp, left))
32790 return -EFAULT;
32791 } else {
32792 memcpy(buf, dp, left);
32793@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
32794 }
32795 if (left) {
32796 if (config->user) {
32797- if (copy_from_user(buf, dp, left))
32798+ if (left > sizeof buf || copy_from_user(buf, dp, left))
32799 return -EFAULT;
32800 } else {
32801 memcpy(buf, dp, left);
32802diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
32803index dd6b53a..19d9ee6 100644
32804--- a/drivers/isdn/hardware/eicon/divasync.h
32805+++ b/drivers/isdn/hardware/eicon/divasync.h
32806@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
32807 } diva_didd_add_adapter_t;
32808 typedef struct _diva_didd_remove_adapter {
32809 IDI_CALL p_request;
32810-} diva_didd_remove_adapter_t;
32811+} __no_const diva_didd_remove_adapter_t;
32812 typedef struct _diva_didd_read_adapter_array {
32813 void *buffer;
32814 dword length;
32815diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
32816index d303e65..28bcb7b 100644
32817--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
32818+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
32819@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
32820 typedef struct _diva_os_idi_adapter_interface {
32821 diva_init_card_proc_t cleanup_adapter_proc;
32822 diva_cmd_card_proc_t cmd_proc;
32823-} diva_os_idi_adapter_interface_t;
32824+} __no_const diva_os_idi_adapter_interface_t;
32825
32826 typedef struct _diva_os_xdi_adapter {
32827 struct list_head link;
32828diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
32829index e74df7c..03a03ba 100644
32830--- a/drivers/isdn/icn/icn.c
32831+++ b/drivers/isdn/icn/icn.c
32832@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
32833 if (count > len)
32834 count = len;
32835 if (user) {
32836- if (copy_from_user(msg, buf, count))
32837+ if (count > sizeof msg || copy_from_user(msg, buf, count))
32838 return -EFAULT;
32839 } else
32840 memcpy(msg, buf, count);
32841diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
32842index 8bc4915..4cc6a2e 100644
32843--- a/drivers/leds/leds-mc13783.c
32844+++ b/drivers/leds/leds-mc13783.c
32845@@ -280,7 +280,7 @@ static int __devinit mc13783_led_probe(struct platform_device *pdev)
32846 return -EINVAL;
32847 }
32848
32849- led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
32850+ led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
32851 if (led == NULL) {
32852 dev_err(&pdev->dev, "failed to alloc memory\n");
32853 return -ENOMEM;
32854diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
32855index b5fdcb7..5b6c59f 100644
32856--- a/drivers/lguest/core.c
32857+++ b/drivers/lguest/core.c
32858@@ -92,9 +92,17 @@ static __init int map_switcher(void)
32859 * it's worked so far. The end address needs +1 because __get_vm_area
32860 * allocates an extra guard page, so we need space for that.
32861 */
32862+
32863+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
32864+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32865+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
32866+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32867+#else
32868 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32869 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
32870 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32871+#endif
32872+
32873 if (!switcher_vma) {
32874 err = -ENOMEM;
32875 printk("lguest: could not map switcher pages high\n");
32876@@ -119,7 +127,7 @@ static __init int map_switcher(void)
32877 * Now the Switcher is mapped at the right address, we can't fail!
32878 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
32879 */
32880- memcpy(switcher_vma->addr, start_switcher_text,
32881+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
32882 end_switcher_text - start_switcher_text);
32883
32884 printk(KERN_INFO "lguest: mapped switcher at %p\n",
32885diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
32886index 3980903..ce25c5e 100644
32887--- a/drivers/lguest/x86/core.c
32888+++ b/drivers/lguest/x86/core.c
32889@@ -59,7 +59,7 @@ static struct {
32890 /* Offset from where switcher.S was compiled to where we've copied it */
32891 static unsigned long switcher_offset(void)
32892 {
32893- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
32894+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
32895 }
32896
32897 /* This cpu's struct lguest_pages. */
32898@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
32899 * These copies are pretty cheap, so we do them unconditionally: */
32900 /* Save the current Host top-level page directory.
32901 */
32902+
32903+#ifdef CONFIG_PAX_PER_CPU_PGD
32904+ pages->state.host_cr3 = read_cr3();
32905+#else
32906 pages->state.host_cr3 = __pa(current->mm->pgd);
32907+#endif
32908+
32909 /*
32910 * Set up the Guest's page tables to see this CPU's pages (and no
32911 * other CPU's pages).
32912@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
32913 * compiled-in switcher code and the high-mapped copy we just made.
32914 */
32915 for (i = 0; i < IDT_ENTRIES; i++)
32916- default_idt_entries[i] += switcher_offset();
32917+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
32918
32919 /*
32920 * Set up the Switcher's per-cpu areas.
32921@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
32922 * it will be undisturbed when we switch. To change %cs and jump we
32923 * need this structure to feed to Intel's "lcall" instruction.
32924 */
32925- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
32926+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
32927 lguest_entry.segment = LGUEST_CS;
32928
32929 /*
32930diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
32931index 40634b0..4f5855e 100644
32932--- a/drivers/lguest/x86/switcher_32.S
32933+++ b/drivers/lguest/x86/switcher_32.S
32934@@ -87,6 +87,7 @@
32935 #include <asm/page.h>
32936 #include <asm/segment.h>
32937 #include <asm/lguest.h>
32938+#include <asm/processor-flags.h>
32939
32940 // We mark the start of the code to copy
32941 // It's placed in .text tho it's never run here
32942@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
32943 // Changes type when we load it: damn Intel!
32944 // For after we switch over our page tables
32945 // That entry will be read-only: we'd crash.
32946+
32947+#ifdef CONFIG_PAX_KERNEXEC
32948+ mov %cr0, %edx
32949+ xor $X86_CR0_WP, %edx
32950+ mov %edx, %cr0
32951+#endif
32952+
32953 movl $(GDT_ENTRY_TSS*8), %edx
32954 ltr %dx
32955
32956@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
32957 // Let's clear it again for our return.
32958 // The GDT descriptor of the Host
32959 // Points to the table after two "size" bytes
32960- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
32961+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
32962 // Clear "used" from type field (byte 5, bit 2)
32963- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
32964+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
32965+
32966+#ifdef CONFIG_PAX_KERNEXEC
32967+ mov %cr0, %eax
32968+ xor $X86_CR0_WP, %eax
32969+ mov %eax, %cr0
32970+#endif
32971
32972 // Once our page table's switched, the Guest is live!
32973 // The Host fades as we run this final step.
32974@@ -295,13 +309,12 @@ deliver_to_host:
32975 // I consulted gcc, and it gave
32976 // These instructions, which I gladly credit:
32977 leal (%edx,%ebx,8), %eax
32978- movzwl (%eax),%edx
32979- movl 4(%eax), %eax
32980- xorw %ax, %ax
32981- orl %eax, %edx
32982+ movl 4(%eax), %edx
32983+ movw (%eax), %dx
32984 // Now the address of the handler's in %edx
32985 // We call it now: its "iret" drops us home.
32986- jmp *%edx
32987+ ljmp $__KERNEL_CS, $1f
32988+1: jmp *%edx
32989
32990 // Every interrupt can come to us here
32991 // But we must truly tell each apart.
32992diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
32993index 20e5c2c..9e849a9 100644
32994--- a/drivers/macintosh/macio_asic.c
32995+++ b/drivers/macintosh/macio_asic.c
32996@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
32997 * MacIO is matched against any Apple ID, it's probe() function
32998 * will then decide wether it applies or not
32999 */
33000-static const struct pci_device_id __devinitdata pci_ids [] = { {
33001+static const struct pci_device_id __devinitconst pci_ids [] = { {
33002 .vendor = PCI_VENDOR_ID_APPLE,
33003 .device = PCI_ANY_ID,
33004 .subvendor = PCI_ANY_ID,
33005diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
33006index 17e2b47..bcbeec4 100644
33007--- a/drivers/md/bitmap.c
33008+++ b/drivers/md/bitmap.c
33009@@ -1823,7 +1823,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
33010 chunk_kb ? "KB" : "B");
33011 if (bitmap->file) {
33012 seq_printf(seq, ", file: ");
33013- seq_path(seq, &bitmap->file->f_path, " \t\n");
33014+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
33015 }
33016
33017 seq_printf(seq, "\n");
33018diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
33019index a1a3e6d..1918bfc 100644
33020--- a/drivers/md/dm-ioctl.c
33021+++ b/drivers/md/dm-ioctl.c
33022@@ -1590,7 +1590,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
33023 cmd == DM_LIST_VERSIONS_CMD)
33024 return 0;
33025
33026- if ((cmd == DM_DEV_CREATE_CMD)) {
33027+ if (cmd == DM_DEV_CREATE_CMD) {
33028 if (!*param->name) {
33029 DMWARN("name not supplied when creating device");
33030 return -EINVAL;
33031diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
33032index d039de8..0cf5b87 100644
33033--- a/drivers/md/dm-raid1.c
33034+++ b/drivers/md/dm-raid1.c
33035@@ -40,7 +40,7 @@ enum dm_raid1_error {
33036
33037 struct mirror {
33038 struct mirror_set *ms;
33039- atomic_t error_count;
33040+ atomic_unchecked_t error_count;
33041 unsigned long error_type;
33042 struct dm_dev *dev;
33043 sector_t offset;
33044@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
33045 struct mirror *m;
33046
33047 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
33048- if (!atomic_read(&m->error_count))
33049+ if (!atomic_read_unchecked(&m->error_count))
33050 return m;
33051
33052 return NULL;
33053@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
33054 * simple way to tell if a device has encountered
33055 * errors.
33056 */
33057- atomic_inc(&m->error_count);
33058+ atomic_inc_unchecked(&m->error_count);
33059
33060 if (test_and_set_bit(error_type, &m->error_type))
33061 return;
33062@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
33063 struct mirror *m = get_default_mirror(ms);
33064
33065 do {
33066- if (likely(!atomic_read(&m->error_count)))
33067+ if (likely(!atomic_read_unchecked(&m->error_count)))
33068 return m;
33069
33070 if (m-- == ms->mirror)
33071@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
33072 {
33073 struct mirror *default_mirror = get_default_mirror(m->ms);
33074
33075- return !atomic_read(&default_mirror->error_count);
33076+ return !atomic_read_unchecked(&default_mirror->error_count);
33077 }
33078
33079 static int mirror_available(struct mirror_set *ms, struct bio *bio)
33080@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
33081 */
33082 if (likely(region_in_sync(ms, region, 1)))
33083 m = choose_mirror(ms, bio->bi_sector);
33084- else if (m && atomic_read(&m->error_count))
33085+ else if (m && atomic_read_unchecked(&m->error_count))
33086 m = NULL;
33087
33088 if (likely(m))
33089@@ -938,7 +938,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
33090 }
33091
33092 ms->mirror[mirror].ms = ms;
33093- atomic_set(&(ms->mirror[mirror].error_count), 0);
33094+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
33095 ms->mirror[mirror].error_type = 0;
33096 ms->mirror[mirror].offset = offset;
33097
33098@@ -1351,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
33099 */
33100 static char device_status_char(struct mirror *m)
33101 {
33102- if (!atomic_read(&(m->error_count)))
33103+ if (!atomic_read_unchecked(&(m->error_count)))
33104 return 'A';
33105
33106 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
33107diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
33108index 35c94ff..20d4c17 100644
33109--- a/drivers/md/dm-stripe.c
33110+++ b/drivers/md/dm-stripe.c
33111@@ -20,7 +20,7 @@ struct stripe {
33112 struct dm_dev *dev;
33113 sector_t physical_start;
33114
33115- atomic_t error_count;
33116+ atomic_unchecked_t error_count;
33117 };
33118
33119 struct stripe_c {
33120@@ -193,7 +193,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
33121 kfree(sc);
33122 return r;
33123 }
33124- atomic_set(&(sc->stripe[i].error_count), 0);
33125+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
33126 }
33127
33128 ti->private = sc;
33129@@ -315,7 +315,7 @@ static int stripe_status(struct dm_target *ti,
33130 DMEMIT("%d ", sc->stripes);
33131 for (i = 0; i < sc->stripes; i++) {
33132 DMEMIT("%s ", sc->stripe[i].dev->name);
33133- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
33134+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
33135 'D' : 'A';
33136 }
33137 buffer[i] = '\0';
33138@@ -362,8 +362,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
33139 */
33140 for (i = 0; i < sc->stripes; i++)
33141 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
33142- atomic_inc(&(sc->stripe[i].error_count));
33143- if (atomic_read(&(sc->stripe[i].error_count)) <
33144+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
33145+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
33146 DM_IO_ERROR_THRESHOLD)
33147 schedule_work(&sc->trigger_event);
33148 }
33149diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
33150index 2e227fb..44ead1f 100644
33151--- a/drivers/md/dm-table.c
33152+++ b/drivers/md/dm-table.c
33153@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
33154 if (!dev_size)
33155 return 0;
33156
33157- if ((start >= dev_size) || (start + len > dev_size)) {
33158+ if ((start >= dev_size) || (len > dev_size - start)) {
33159 DMWARN("%s: %s too small for target: "
33160 "start=%llu, len=%llu, dev_size=%llu",
33161 dm_device_name(ti->table->md), bdevname(bdev, b),
33162diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
33163index 737d388..811ad5a 100644
33164--- a/drivers/md/dm-thin-metadata.c
33165+++ b/drivers/md/dm-thin-metadata.c
33166@@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33167
33168 pmd->info.tm = tm;
33169 pmd->info.levels = 2;
33170- pmd->info.value_type.context = pmd->data_sm;
33171+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33172 pmd->info.value_type.size = sizeof(__le64);
33173 pmd->info.value_type.inc = data_block_inc;
33174 pmd->info.value_type.dec = data_block_dec;
33175@@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33176
33177 pmd->bl_info.tm = tm;
33178 pmd->bl_info.levels = 1;
33179- pmd->bl_info.value_type.context = pmd->data_sm;
33180+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33181 pmd->bl_info.value_type.size = sizeof(__le64);
33182 pmd->bl_info.value_type.inc = data_block_inc;
33183 pmd->bl_info.value_type.dec = data_block_dec;
33184diff --git a/drivers/md/dm.c b/drivers/md/dm.c
33185index e24143c..ce2f21a1 100644
33186--- a/drivers/md/dm.c
33187+++ b/drivers/md/dm.c
33188@@ -176,9 +176,9 @@ struct mapped_device {
33189 /*
33190 * Event handling.
33191 */
33192- atomic_t event_nr;
33193+ atomic_unchecked_t event_nr;
33194 wait_queue_head_t eventq;
33195- atomic_t uevent_seq;
33196+ atomic_unchecked_t uevent_seq;
33197 struct list_head uevent_list;
33198 spinlock_t uevent_lock; /* Protect access to uevent_list */
33199
33200@@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor)
33201 rwlock_init(&md->map_lock);
33202 atomic_set(&md->holders, 1);
33203 atomic_set(&md->open_count, 0);
33204- atomic_set(&md->event_nr, 0);
33205- atomic_set(&md->uevent_seq, 0);
33206+ atomic_set_unchecked(&md->event_nr, 0);
33207+ atomic_set_unchecked(&md->uevent_seq, 0);
33208 INIT_LIST_HEAD(&md->uevent_list);
33209 spin_lock_init(&md->uevent_lock);
33210
33211@@ -1980,7 +1980,7 @@ static void event_callback(void *context)
33212
33213 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
33214
33215- atomic_inc(&md->event_nr);
33216+ atomic_inc_unchecked(&md->event_nr);
33217 wake_up(&md->eventq);
33218 }
33219
33220@@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
33221
33222 uint32_t dm_next_uevent_seq(struct mapped_device *md)
33223 {
33224- return atomic_add_return(1, &md->uevent_seq);
33225+ return atomic_add_return_unchecked(1, &md->uevent_seq);
33226 }
33227
33228 uint32_t dm_get_event_nr(struct mapped_device *md)
33229 {
33230- return atomic_read(&md->event_nr);
33231+ return atomic_read_unchecked(&md->event_nr);
33232 }
33233
33234 int dm_wait_event(struct mapped_device *md, int event_nr)
33235 {
33236 return wait_event_interruptible(md->eventq,
33237- (event_nr != atomic_read(&md->event_nr)));
33238+ (event_nr != atomic_read_unchecked(&md->event_nr)));
33239 }
33240
33241 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
33242diff --git a/drivers/md/md.c b/drivers/md/md.c
33243index 2b30ffd..bf789ce 100644
33244--- a/drivers/md/md.c
33245+++ b/drivers/md/md.c
33246@@ -277,10 +277,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
33247 * start build, activate spare
33248 */
33249 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
33250-static atomic_t md_event_count;
33251+static atomic_unchecked_t md_event_count;
33252 void md_new_event(struct mddev *mddev)
33253 {
33254- atomic_inc(&md_event_count);
33255+ atomic_inc_unchecked(&md_event_count);
33256 wake_up(&md_event_waiters);
33257 }
33258 EXPORT_SYMBOL_GPL(md_new_event);
33259@@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
33260 */
33261 static void md_new_event_inintr(struct mddev *mddev)
33262 {
33263- atomic_inc(&md_event_count);
33264+ atomic_inc_unchecked(&md_event_count);
33265 wake_up(&md_event_waiters);
33266 }
33267
33268@@ -1526,7 +1526,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
33269
33270 rdev->preferred_minor = 0xffff;
33271 rdev->data_offset = le64_to_cpu(sb->data_offset);
33272- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33273+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33274
33275 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
33276 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
33277@@ -1745,7 +1745,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
33278 else
33279 sb->resync_offset = cpu_to_le64(0);
33280
33281- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
33282+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
33283
33284 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
33285 sb->size = cpu_to_le64(mddev->dev_sectors);
33286@@ -2691,7 +2691,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
33287 static ssize_t
33288 errors_show(struct md_rdev *rdev, char *page)
33289 {
33290- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
33291+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
33292 }
33293
33294 static ssize_t
33295@@ -2700,7 +2700,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
33296 char *e;
33297 unsigned long n = simple_strtoul(buf, &e, 10);
33298 if (*buf && (*e == 0 || *e == '\n')) {
33299- atomic_set(&rdev->corrected_errors, n);
33300+ atomic_set_unchecked(&rdev->corrected_errors, n);
33301 return len;
33302 }
33303 return -EINVAL;
33304@@ -3086,8 +3086,8 @@ int md_rdev_init(struct md_rdev *rdev)
33305 rdev->sb_loaded = 0;
33306 rdev->bb_page = NULL;
33307 atomic_set(&rdev->nr_pending, 0);
33308- atomic_set(&rdev->read_errors, 0);
33309- atomic_set(&rdev->corrected_errors, 0);
33310+ atomic_set_unchecked(&rdev->read_errors, 0);
33311+ atomic_set_unchecked(&rdev->corrected_errors, 0);
33312
33313 INIT_LIST_HEAD(&rdev->same_set);
33314 init_waitqueue_head(&rdev->blocked_wait);
33315@@ -6738,7 +6738,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
33316
33317 spin_unlock(&pers_lock);
33318 seq_printf(seq, "\n");
33319- seq->poll_event = atomic_read(&md_event_count);
33320+ seq->poll_event = atomic_read_unchecked(&md_event_count);
33321 return 0;
33322 }
33323 if (v == (void*)2) {
33324@@ -6841,7 +6841,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
33325 return error;
33326
33327 seq = file->private_data;
33328- seq->poll_event = atomic_read(&md_event_count);
33329+ seq->poll_event = atomic_read_unchecked(&md_event_count);
33330 return error;
33331 }
33332
33333@@ -6855,7 +6855,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
33334 /* always allow read */
33335 mask = POLLIN | POLLRDNORM;
33336
33337- if (seq->poll_event != atomic_read(&md_event_count))
33338+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
33339 mask |= POLLERR | POLLPRI;
33340 return mask;
33341 }
33342@@ -6899,7 +6899,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
33343 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
33344 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
33345 (int)part_stat_read(&disk->part0, sectors[1]) -
33346- atomic_read(&disk->sync_io);
33347+ atomic_read_unchecked(&disk->sync_io);
33348 /* sync IO will cause sync_io to increase before the disk_stats
33349 * as sync_io is counted when a request starts, and
33350 * disk_stats is counted when it completes.
33351diff --git a/drivers/md/md.h b/drivers/md/md.h
33352index 1c2063c..9639970 100644
33353--- a/drivers/md/md.h
33354+++ b/drivers/md/md.h
33355@@ -93,13 +93,13 @@ struct md_rdev {
33356 * only maintained for arrays that
33357 * support hot removal
33358 */
33359- atomic_t read_errors; /* number of consecutive read errors that
33360+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
33361 * we have tried to ignore.
33362 */
33363 struct timespec last_read_error; /* monotonic time since our
33364 * last read error
33365 */
33366- atomic_t corrected_errors; /* number of corrected read errors,
33367+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
33368 * for reporting to userspace and storing
33369 * in superblock.
33370 */
33371@@ -429,7 +429,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
33372
33373 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
33374 {
33375- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33376+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33377 }
33378
33379 struct md_personality
33380diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
33381index 50ed53b..4f29d7d 100644
33382--- a/drivers/md/persistent-data/dm-space-map-checker.c
33383+++ b/drivers/md/persistent-data/dm-space-map-checker.c
33384@@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
33385 /*----------------------------------------------------------------*/
33386
33387 struct sm_checker {
33388- struct dm_space_map sm;
33389+ dm_space_map_no_const sm;
33390
33391 struct count_array old_counts;
33392 struct count_array counts;
33393diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
33394index fc469ba..2d91555 100644
33395--- a/drivers/md/persistent-data/dm-space-map-disk.c
33396+++ b/drivers/md/persistent-data/dm-space-map-disk.c
33397@@ -23,7 +23,7 @@
33398 * Space map interface.
33399 */
33400 struct sm_disk {
33401- struct dm_space_map sm;
33402+ dm_space_map_no_const sm;
33403
33404 struct ll_disk ll;
33405 struct ll_disk old_ll;
33406diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
33407index e89ae5e..062e4c2 100644
33408--- a/drivers/md/persistent-data/dm-space-map-metadata.c
33409+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
33410@@ -43,7 +43,7 @@ struct block_op {
33411 };
33412
33413 struct sm_metadata {
33414- struct dm_space_map sm;
33415+ dm_space_map_no_const sm;
33416
33417 struct ll_disk ll;
33418 struct ll_disk old_ll;
33419diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
33420index 1cbfc6b..56e1dbb 100644
33421--- a/drivers/md/persistent-data/dm-space-map.h
33422+++ b/drivers/md/persistent-data/dm-space-map.h
33423@@ -60,6 +60,7 @@ struct dm_space_map {
33424 int (*root_size)(struct dm_space_map *sm, size_t *result);
33425 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
33426 };
33427+typedef struct dm_space_map __no_const dm_space_map_no_const;
33428
33429 /*----------------------------------------------------------------*/
33430
33431diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
33432index d7e9577..faa512f2 100644
33433--- a/drivers/md/raid1.c
33434+++ b/drivers/md/raid1.c
33435@@ -1688,7 +1688,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
33436 if (r1_sync_page_io(rdev, sect, s,
33437 bio->bi_io_vec[idx].bv_page,
33438 READ) != 0)
33439- atomic_add(s, &rdev->corrected_errors);
33440+ atomic_add_unchecked(s, &rdev->corrected_errors);
33441 }
33442 sectors -= s;
33443 sect += s;
33444@@ -1902,7 +1902,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
33445 test_bit(In_sync, &rdev->flags)) {
33446 if (r1_sync_page_io(rdev, sect, s,
33447 conf->tmppage, READ)) {
33448- atomic_add(s, &rdev->corrected_errors);
33449+ atomic_add_unchecked(s, &rdev->corrected_errors);
33450 printk(KERN_INFO
33451 "md/raid1:%s: read error corrected "
33452 "(%d sectors at %llu on %s)\n",
33453diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
33454index d037adb..ed17dc9 100644
33455--- a/drivers/md/raid10.c
33456+++ b/drivers/md/raid10.c
33457@@ -1684,7 +1684,7 @@ static void end_sync_read(struct bio *bio, int error)
33458 /* The write handler will notice the lack of
33459 * R10BIO_Uptodate and record any errors etc
33460 */
33461- atomic_add(r10_bio->sectors,
33462+ atomic_add_unchecked(r10_bio->sectors,
33463 &conf->mirrors[d].rdev->corrected_errors);
33464
33465 /* for reconstruct, we always reschedule after a read.
33466@@ -2033,7 +2033,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33467 {
33468 struct timespec cur_time_mon;
33469 unsigned long hours_since_last;
33470- unsigned int read_errors = atomic_read(&rdev->read_errors);
33471+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
33472
33473 ktime_get_ts(&cur_time_mon);
33474
33475@@ -2055,9 +2055,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33476 * overflowing the shift of read_errors by hours_since_last.
33477 */
33478 if (hours_since_last >= 8 * sizeof(read_errors))
33479- atomic_set(&rdev->read_errors, 0);
33480+ atomic_set_unchecked(&rdev->read_errors, 0);
33481 else
33482- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
33483+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
33484 }
33485
33486 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
33487@@ -2111,8 +2111,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33488 return;
33489
33490 check_decay_read_errors(mddev, rdev);
33491- atomic_inc(&rdev->read_errors);
33492- if (atomic_read(&rdev->read_errors) > max_read_errors) {
33493+ atomic_inc_unchecked(&rdev->read_errors);
33494+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
33495 char b[BDEVNAME_SIZE];
33496 bdevname(rdev->bdev, b);
33497
33498@@ -2120,7 +2120,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33499 "md/raid10:%s: %s: Raid device exceeded "
33500 "read_error threshold [cur %d:max %d]\n",
33501 mdname(mddev), b,
33502- atomic_read(&rdev->read_errors), max_read_errors);
33503+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
33504 printk(KERN_NOTICE
33505 "md/raid10:%s: %s: Failing raid device\n",
33506 mdname(mddev), b);
33507@@ -2271,7 +2271,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33508 (unsigned long long)(
33509 sect + rdev->data_offset),
33510 bdevname(rdev->bdev, b));
33511- atomic_add(s, &rdev->corrected_errors);
33512+ atomic_add_unchecked(s, &rdev->corrected_errors);
33513 }
33514
33515 rdev_dec_pending(rdev, mddev);
33516diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
33517index f351422..85c01bb 100644
33518--- a/drivers/md/raid5.c
33519+++ b/drivers/md/raid5.c
33520@@ -1686,18 +1686,18 @@ static void raid5_end_read_request(struct bio * bi, int error)
33521 (unsigned long long)(sh->sector
33522 + rdev->data_offset),
33523 bdevname(rdev->bdev, b));
33524- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
33525+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
33526 clear_bit(R5_ReadError, &sh->dev[i].flags);
33527 clear_bit(R5_ReWrite, &sh->dev[i].flags);
33528 }
33529- if (atomic_read(&rdev->read_errors))
33530- atomic_set(&rdev->read_errors, 0);
33531+ if (atomic_read_unchecked(&rdev->read_errors))
33532+ atomic_set_unchecked(&rdev->read_errors, 0);
33533 } else {
33534 const char *bdn = bdevname(rdev->bdev, b);
33535 int retry = 0;
33536
33537 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
33538- atomic_inc(&rdev->read_errors);
33539+ atomic_inc_unchecked(&rdev->read_errors);
33540 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
33541 printk_ratelimited(
33542 KERN_WARNING
33543@@ -1726,7 +1726,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
33544 (unsigned long long)(sh->sector
33545 + rdev->data_offset),
33546 bdn);
33547- else if (atomic_read(&rdev->read_errors)
33548+ else if (atomic_read_unchecked(&rdev->read_errors)
33549 > conf->max_nr_stripes)
33550 printk(KERN_WARNING
33551 "md/raid:%s: Too many read errors, failing device %s.\n",
33552diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
33553index d88c4aa..17c80b1 100644
33554--- a/drivers/media/dvb/ddbridge/ddbridge-core.c
33555+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
33556@@ -1679,7 +1679,7 @@ static struct ddb_info ddb_v6 = {
33557 .subvendor = _subvend, .subdevice = _subdev, \
33558 .driver_data = (unsigned long)&_driverdata }
33559
33560-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
33561+static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
33562 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
33563 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
33564 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
33565diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
33566index a7d876f..8c21b61 100644
33567--- a/drivers/media/dvb/dvb-core/dvb_demux.h
33568+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
33569@@ -73,7 +73,7 @@ struct dvb_demux_feed {
33570 union {
33571 dmx_ts_cb ts;
33572 dmx_section_cb sec;
33573- } cb;
33574+ } __no_const cb;
33575
33576 struct dvb_demux *demux;
33577 void *priv;
33578diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
33579index 00a6732..70a682e 100644
33580--- a/drivers/media/dvb/dvb-core/dvbdev.c
33581+++ b/drivers/media/dvb/dvb-core/dvbdev.c
33582@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
33583 const struct dvb_device *template, void *priv, int type)
33584 {
33585 struct dvb_device *dvbdev;
33586- struct file_operations *dvbdevfops;
33587+ file_operations_no_const *dvbdevfops;
33588 struct device *clsdev;
33589 int minor;
33590 int id;
33591diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
33592index 3940bb0..fb3952a 100644
33593--- a/drivers/media/dvb/dvb-usb/cxusb.c
33594+++ b/drivers/media/dvb/dvb-usb/cxusb.c
33595@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
33596
33597 struct dib0700_adapter_state {
33598 int (*set_param_save) (struct dvb_frontend *);
33599-};
33600+} __no_const;
33601
33602 static int dib7070_set_param_override(struct dvb_frontend *fe)
33603 {
33604diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
33605index 451c5a7..649f711 100644
33606--- a/drivers/media/dvb/dvb-usb/dw2102.c
33607+++ b/drivers/media/dvb/dvb-usb/dw2102.c
33608@@ -95,7 +95,7 @@ struct su3000_state {
33609
33610 struct s6x0_state {
33611 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
33612-};
33613+} __no_const;
33614
33615 /* debug */
33616 static int dvb_usb_dw2102_debug;
33617diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
33618index 404f63a..4796533 100644
33619--- a/drivers/media/dvb/frontends/dib3000.h
33620+++ b/drivers/media/dvb/frontends/dib3000.h
33621@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
33622 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
33623 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
33624 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
33625-};
33626+} __no_const;
33627
33628 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
33629 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
33630diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
33631index 7539a5d..06531a6 100644
33632--- a/drivers/media/dvb/ngene/ngene-cards.c
33633+++ b/drivers/media/dvb/ngene/ngene-cards.c
33634@@ -478,7 +478,7 @@ static struct ngene_info ngene_info_m780 = {
33635
33636 /****************************************************************************/
33637
33638-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
33639+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
33640 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
33641 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
33642 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
33643diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
33644index 16a089f..1661b11 100644
33645--- a/drivers/media/radio/radio-cadet.c
33646+++ b/drivers/media/radio/radio-cadet.c
33647@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33648 unsigned char readbuf[RDS_BUFFER];
33649 int i = 0;
33650
33651+ if (count > RDS_BUFFER)
33652+ return -EFAULT;
33653 mutex_lock(&dev->lock);
33654 if (dev->rdsstat == 0) {
33655 dev->rdsstat = 1;
33656@@ -347,7 +349,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33657 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
33658 mutex_unlock(&dev->lock);
33659
33660- if (copy_to_user(data, readbuf, i))
33661+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
33662 return -EFAULT;
33663 return i;
33664 }
33665diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
33666index 9cde353..8c6a1c3 100644
33667--- a/drivers/media/video/au0828/au0828.h
33668+++ b/drivers/media/video/au0828/au0828.h
33669@@ -191,7 +191,7 @@ struct au0828_dev {
33670
33671 /* I2C */
33672 struct i2c_adapter i2c_adap;
33673- struct i2c_algorithm i2c_algo;
33674+ i2c_algorithm_no_const i2c_algo;
33675 struct i2c_client i2c_client;
33676 u32 i2c_rc;
33677
33678diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
33679index 04bf662..e0ac026 100644
33680--- a/drivers/media/video/cx88/cx88-alsa.c
33681+++ b/drivers/media/video/cx88/cx88-alsa.c
33682@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
33683 * Only boards with eeprom and byte 1 at eeprom=1 have it
33684 */
33685
33686-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
33687+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
33688 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33689 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33690 {0, }
33691diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
33692index 88cf9d9..bbc4b2c 100644
33693--- a/drivers/media/video/omap/omap_vout.c
33694+++ b/drivers/media/video/omap/omap_vout.c
33695@@ -64,7 +64,6 @@ enum omap_vout_channels {
33696 OMAP_VIDEO2,
33697 };
33698
33699-static struct videobuf_queue_ops video_vbq_ops;
33700 /* Variables configurable through module params*/
33701 static u32 video1_numbuffers = 3;
33702 static u32 video2_numbuffers = 3;
33703@@ -1000,6 +999,12 @@ static int omap_vout_open(struct file *file)
33704 {
33705 struct videobuf_queue *q;
33706 struct omap_vout_device *vout = NULL;
33707+ static struct videobuf_queue_ops video_vbq_ops = {
33708+ .buf_setup = omap_vout_buffer_setup,
33709+ .buf_prepare = omap_vout_buffer_prepare,
33710+ .buf_release = omap_vout_buffer_release,
33711+ .buf_queue = omap_vout_buffer_queue,
33712+ };
33713
33714 vout = video_drvdata(file);
33715 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
33716@@ -1017,10 +1022,6 @@ static int omap_vout_open(struct file *file)
33717 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
33718
33719 q = &vout->vbq;
33720- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
33721- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
33722- video_vbq_ops.buf_release = omap_vout_buffer_release;
33723- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
33724 spin_lock_init(&vout->vbq_lock);
33725
33726 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
33727diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33728index 305e6aa..0143317 100644
33729--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33730+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33731@@ -196,7 +196,7 @@ struct pvr2_hdw {
33732
33733 /* I2C stuff */
33734 struct i2c_adapter i2c_adap;
33735- struct i2c_algorithm i2c_algo;
33736+ i2c_algorithm_no_const i2c_algo;
33737 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
33738 int i2c_cx25840_hack_state;
33739 int i2c_linked;
33740diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
33741index 02194c0..091733b 100644
33742--- a/drivers/media/video/timblogiw.c
33743+++ b/drivers/media/video/timblogiw.c
33744@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
33745
33746 /* Platform device functions */
33747
33748-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33749+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
33750 .vidioc_querycap = timblogiw_querycap,
33751 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
33752 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
33753@@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33754 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
33755 };
33756
33757-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
33758+static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
33759 .owner = THIS_MODULE,
33760 .open = timblogiw_open,
33761 .release = timblogiw_close,
33762diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
33763index a5c591f..db692a3 100644
33764--- a/drivers/message/fusion/mptbase.c
33765+++ b/drivers/message/fusion/mptbase.c
33766@@ -6754,8 +6754,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
33767 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
33768 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
33769
33770+#ifdef CONFIG_GRKERNSEC_HIDESYM
33771+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
33772+#else
33773 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
33774 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
33775+#endif
33776+
33777 /*
33778 * Rounding UP to nearest 4-kB boundary here...
33779 */
33780diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
33781index 551262e..7551198 100644
33782--- a/drivers/message/fusion/mptsas.c
33783+++ b/drivers/message/fusion/mptsas.c
33784@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
33785 return 0;
33786 }
33787
33788+static inline void
33789+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33790+{
33791+ if (phy_info->port_details) {
33792+ phy_info->port_details->rphy = rphy;
33793+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33794+ ioc->name, rphy));
33795+ }
33796+
33797+ if (rphy) {
33798+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33799+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33800+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
33801+ ioc->name, rphy, rphy->dev.release));
33802+ }
33803+}
33804+
33805 /* no mutex */
33806 static void
33807 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
33808@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
33809 return NULL;
33810 }
33811
33812-static inline void
33813-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33814-{
33815- if (phy_info->port_details) {
33816- phy_info->port_details->rphy = rphy;
33817- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33818- ioc->name, rphy));
33819- }
33820-
33821- if (rphy) {
33822- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33823- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33824- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
33825- ioc->name, rphy, rphy->dev.release));
33826- }
33827-}
33828-
33829 static inline struct sas_port *
33830 mptsas_get_port(struct mptsas_phyinfo *phy_info)
33831 {
33832diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
33833index 0c3ced7..1fe34ec 100644
33834--- a/drivers/message/fusion/mptscsih.c
33835+++ b/drivers/message/fusion/mptscsih.c
33836@@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
33837
33838 h = shost_priv(SChost);
33839
33840- if (h) {
33841- if (h->info_kbuf == NULL)
33842- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
33843- return h->info_kbuf;
33844- h->info_kbuf[0] = '\0';
33845+ if (!h)
33846+ return NULL;
33847
33848- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
33849- h->info_kbuf[size-1] = '\0';
33850- }
33851+ if (h->info_kbuf == NULL)
33852+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
33853+ return h->info_kbuf;
33854+ h->info_kbuf[0] = '\0';
33855+
33856+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
33857+ h->info_kbuf[size-1] = '\0';
33858
33859 return h->info_kbuf;
33860 }
33861diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
33862index 6d115c7..58ff7fd 100644
33863--- a/drivers/message/i2o/i2o_proc.c
33864+++ b/drivers/message/i2o/i2o_proc.c
33865@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
33866 "Array Controller Device"
33867 };
33868
33869-static char *chtostr(u8 * chars, int n)
33870-{
33871- char tmp[256];
33872- tmp[0] = 0;
33873- return strncat(tmp, (char *)chars, n);
33874-}
33875-
33876 static int i2o_report_query_status(struct seq_file *seq, int block_status,
33877 char *group)
33878 {
33879@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
33880
33881 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
33882 seq_printf(seq, "%-#8x", ddm_table.module_id);
33883- seq_printf(seq, "%-29s",
33884- chtostr(ddm_table.module_name_version, 28));
33885+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
33886 seq_printf(seq, "%9d ", ddm_table.data_size);
33887 seq_printf(seq, "%8d", ddm_table.code_size);
33888
33889@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
33890
33891 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
33892 seq_printf(seq, "%-#8x", dst->module_id);
33893- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
33894- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
33895+ seq_printf(seq, "%-.28s", dst->module_name_version);
33896+ seq_printf(seq, "%-.8s", dst->date);
33897 seq_printf(seq, "%8d ", dst->module_size);
33898 seq_printf(seq, "%8d ", dst->mpb_size);
33899 seq_printf(seq, "0x%04x", dst->module_flags);
33900@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
33901 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
33902 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
33903 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
33904- seq_printf(seq, "Vendor info : %s\n",
33905- chtostr((u8 *) (work32 + 2), 16));
33906- seq_printf(seq, "Product info : %s\n",
33907- chtostr((u8 *) (work32 + 6), 16));
33908- seq_printf(seq, "Description : %s\n",
33909- chtostr((u8 *) (work32 + 10), 16));
33910- seq_printf(seq, "Product rev. : %s\n",
33911- chtostr((u8 *) (work32 + 14), 8));
33912+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
33913+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
33914+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
33915+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
33916
33917 seq_printf(seq, "Serial number : ");
33918 print_serial_number(seq, (u8 *) (work32 + 16),
33919@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
33920 }
33921
33922 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
33923- seq_printf(seq, "Module name : %s\n",
33924- chtostr(result.module_name, 24));
33925- seq_printf(seq, "Module revision : %s\n",
33926- chtostr(result.module_rev, 8));
33927+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
33928+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
33929
33930 seq_printf(seq, "Serial number : ");
33931 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
33932@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
33933 return 0;
33934 }
33935
33936- seq_printf(seq, "Device name : %s\n",
33937- chtostr(result.device_name, 64));
33938- seq_printf(seq, "Service name : %s\n",
33939- chtostr(result.service_name, 64));
33940- seq_printf(seq, "Physical name : %s\n",
33941- chtostr(result.physical_location, 64));
33942- seq_printf(seq, "Instance number : %s\n",
33943- chtostr(result.instance_number, 4));
33944+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
33945+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
33946+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
33947+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
33948
33949 return 0;
33950 }
33951diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
33952index a8c08f3..155fe3d 100644
33953--- a/drivers/message/i2o/iop.c
33954+++ b/drivers/message/i2o/iop.c
33955@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
33956
33957 spin_lock_irqsave(&c->context_list_lock, flags);
33958
33959- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
33960- atomic_inc(&c->context_list_counter);
33961+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
33962+ atomic_inc_unchecked(&c->context_list_counter);
33963
33964- entry->context = atomic_read(&c->context_list_counter);
33965+ entry->context = atomic_read_unchecked(&c->context_list_counter);
33966
33967 list_add(&entry->list, &c->context_list);
33968
33969@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
33970
33971 #if BITS_PER_LONG == 64
33972 spin_lock_init(&c->context_list_lock);
33973- atomic_set(&c->context_list_counter, 0);
33974+ atomic_set_unchecked(&c->context_list_counter, 0);
33975 INIT_LIST_HEAD(&c->context_list);
33976 #endif
33977
33978diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
33979index 7ce65f4..e66e9bc 100644
33980--- a/drivers/mfd/abx500-core.c
33981+++ b/drivers/mfd/abx500-core.c
33982@@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
33983
33984 struct abx500_device_entry {
33985 struct list_head list;
33986- struct abx500_ops ops;
33987+ abx500_ops_no_const ops;
33988 struct device *dev;
33989 };
33990
33991diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
33992index a9223ed..4127b13 100644
33993--- a/drivers/mfd/janz-cmodio.c
33994+++ b/drivers/mfd/janz-cmodio.c
33995@@ -13,6 +13,7 @@
33996
33997 #include <linux/kernel.h>
33998 #include <linux/module.h>
33999+#include <linux/slab.h>
34000 #include <linux/init.h>
34001 #include <linux/pci.h>
34002 #include <linux/interrupt.h>
34003diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
34004index a981e2a..5ca0c8b 100644
34005--- a/drivers/misc/lis3lv02d/lis3lv02d.c
34006+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
34007@@ -466,7 +466,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
34008 * the lid is closed. This leads to interrupts as soon as a little move
34009 * is done.
34010 */
34011- atomic_inc(&lis3->count);
34012+ atomic_inc_unchecked(&lis3->count);
34013
34014 wake_up_interruptible(&lis3->misc_wait);
34015 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
34016@@ -552,7 +552,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
34017 if (lis3->pm_dev)
34018 pm_runtime_get_sync(lis3->pm_dev);
34019
34020- atomic_set(&lis3->count, 0);
34021+ atomic_set_unchecked(&lis3->count, 0);
34022 return 0;
34023 }
34024
34025@@ -585,7 +585,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
34026 add_wait_queue(&lis3->misc_wait, &wait);
34027 while (true) {
34028 set_current_state(TASK_INTERRUPTIBLE);
34029- data = atomic_xchg(&lis3->count, 0);
34030+ data = atomic_xchg_unchecked(&lis3->count, 0);
34031 if (data)
34032 break;
34033
34034@@ -626,7 +626,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
34035 struct lis3lv02d, miscdev);
34036
34037 poll_wait(file, &lis3->misc_wait, wait);
34038- if (atomic_read(&lis3->count))
34039+ if (atomic_read_unchecked(&lis3->count))
34040 return POLLIN | POLLRDNORM;
34041 return 0;
34042 }
34043diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
34044index 2b1482a..5d33616 100644
34045--- a/drivers/misc/lis3lv02d/lis3lv02d.h
34046+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
34047@@ -266,7 +266,7 @@ struct lis3lv02d {
34048 struct input_polled_dev *idev; /* input device */
34049 struct platform_device *pdev; /* platform device */
34050 struct regulator_bulk_data regulators[2];
34051- atomic_t count; /* interrupt count after last read */
34052+ atomic_unchecked_t count; /* interrupt count after last read */
34053 union axis_conversion ac; /* hw -> logical axis */
34054 int mapped_btns[3];
34055
34056diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
34057index 2f30bad..c4c13d0 100644
34058--- a/drivers/misc/sgi-gru/gruhandles.c
34059+++ b/drivers/misc/sgi-gru/gruhandles.c
34060@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
34061 unsigned long nsec;
34062
34063 nsec = CLKS2NSEC(clks);
34064- atomic_long_inc(&mcs_op_statistics[op].count);
34065- atomic_long_add(nsec, &mcs_op_statistics[op].total);
34066+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
34067+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
34068 if (mcs_op_statistics[op].max < nsec)
34069 mcs_op_statistics[op].max = nsec;
34070 }
34071diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
34072index 950dbe9..eeef0f8 100644
34073--- a/drivers/misc/sgi-gru/gruprocfs.c
34074+++ b/drivers/misc/sgi-gru/gruprocfs.c
34075@@ -32,9 +32,9 @@
34076
34077 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
34078
34079-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
34080+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
34081 {
34082- unsigned long val = atomic_long_read(v);
34083+ unsigned long val = atomic_long_read_unchecked(v);
34084
34085 seq_printf(s, "%16lu %s\n", val, id);
34086 }
34087@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
34088
34089 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
34090 for (op = 0; op < mcsop_last; op++) {
34091- count = atomic_long_read(&mcs_op_statistics[op].count);
34092- total = atomic_long_read(&mcs_op_statistics[op].total);
34093+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
34094+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
34095 max = mcs_op_statistics[op].max;
34096 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
34097 count ? total / count : 0, max);
34098diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
34099index 5c3ce24..4915ccb 100644
34100--- a/drivers/misc/sgi-gru/grutables.h
34101+++ b/drivers/misc/sgi-gru/grutables.h
34102@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
34103 * GRU statistics.
34104 */
34105 struct gru_stats_s {
34106- atomic_long_t vdata_alloc;
34107- atomic_long_t vdata_free;
34108- atomic_long_t gts_alloc;
34109- atomic_long_t gts_free;
34110- atomic_long_t gms_alloc;
34111- atomic_long_t gms_free;
34112- atomic_long_t gts_double_allocate;
34113- atomic_long_t assign_context;
34114- atomic_long_t assign_context_failed;
34115- atomic_long_t free_context;
34116- atomic_long_t load_user_context;
34117- atomic_long_t load_kernel_context;
34118- atomic_long_t lock_kernel_context;
34119- atomic_long_t unlock_kernel_context;
34120- atomic_long_t steal_user_context;
34121- atomic_long_t steal_kernel_context;
34122- atomic_long_t steal_context_failed;
34123- atomic_long_t nopfn;
34124- atomic_long_t asid_new;
34125- atomic_long_t asid_next;
34126- atomic_long_t asid_wrap;
34127- atomic_long_t asid_reuse;
34128- atomic_long_t intr;
34129- atomic_long_t intr_cbr;
34130- atomic_long_t intr_tfh;
34131- atomic_long_t intr_spurious;
34132- atomic_long_t intr_mm_lock_failed;
34133- atomic_long_t call_os;
34134- atomic_long_t call_os_wait_queue;
34135- atomic_long_t user_flush_tlb;
34136- atomic_long_t user_unload_context;
34137- atomic_long_t user_exception;
34138- atomic_long_t set_context_option;
34139- atomic_long_t check_context_retarget_intr;
34140- atomic_long_t check_context_unload;
34141- atomic_long_t tlb_dropin;
34142- atomic_long_t tlb_preload_page;
34143- atomic_long_t tlb_dropin_fail_no_asid;
34144- atomic_long_t tlb_dropin_fail_upm;
34145- atomic_long_t tlb_dropin_fail_invalid;
34146- atomic_long_t tlb_dropin_fail_range_active;
34147- atomic_long_t tlb_dropin_fail_idle;
34148- atomic_long_t tlb_dropin_fail_fmm;
34149- atomic_long_t tlb_dropin_fail_no_exception;
34150- atomic_long_t tfh_stale_on_fault;
34151- atomic_long_t mmu_invalidate_range;
34152- atomic_long_t mmu_invalidate_page;
34153- atomic_long_t flush_tlb;
34154- atomic_long_t flush_tlb_gru;
34155- atomic_long_t flush_tlb_gru_tgh;
34156- atomic_long_t flush_tlb_gru_zero_asid;
34157+ atomic_long_unchecked_t vdata_alloc;
34158+ atomic_long_unchecked_t vdata_free;
34159+ atomic_long_unchecked_t gts_alloc;
34160+ atomic_long_unchecked_t gts_free;
34161+ atomic_long_unchecked_t gms_alloc;
34162+ atomic_long_unchecked_t gms_free;
34163+ atomic_long_unchecked_t gts_double_allocate;
34164+ atomic_long_unchecked_t assign_context;
34165+ atomic_long_unchecked_t assign_context_failed;
34166+ atomic_long_unchecked_t free_context;
34167+ atomic_long_unchecked_t load_user_context;
34168+ atomic_long_unchecked_t load_kernel_context;
34169+ atomic_long_unchecked_t lock_kernel_context;
34170+ atomic_long_unchecked_t unlock_kernel_context;
34171+ atomic_long_unchecked_t steal_user_context;
34172+ atomic_long_unchecked_t steal_kernel_context;
34173+ atomic_long_unchecked_t steal_context_failed;
34174+ atomic_long_unchecked_t nopfn;
34175+ atomic_long_unchecked_t asid_new;
34176+ atomic_long_unchecked_t asid_next;
34177+ atomic_long_unchecked_t asid_wrap;
34178+ atomic_long_unchecked_t asid_reuse;
34179+ atomic_long_unchecked_t intr;
34180+ atomic_long_unchecked_t intr_cbr;
34181+ atomic_long_unchecked_t intr_tfh;
34182+ atomic_long_unchecked_t intr_spurious;
34183+ atomic_long_unchecked_t intr_mm_lock_failed;
34184+ atomic_long_unchecked_t call_os;
34185+ atomic_long_unchecked_t call_os_wait_queue;
34186+ atomic_long_unchecked_t user_flush_tlb;
34187+ atomic_long_unchecked_t user_unload_context;
34188+ atomic_long_unchecked_t user_exception;
34189+ atomic_long_unchecked_t set_context_option;
34190+ atomic_long_unchecked_t check_context_retarget_intr;
34191+ atomic_long_unchecked_t check_context_unload;
34192+ atomic_long_unchecked_t tlb_dropin;
34193+ atomic_long_unchecked_t tlb_preload_page;
34194+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
34195+ atomic_long_unchecked_t tlb_dropin_fail_upm;
34196+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
34197+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
34198+ atomic_long_unchecked_t tlb_dropin_fail_idle;
34199+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
34200+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
34201+ atomic_long_unchecked_t tfh_stale_on_fault;
34202+ atomic_long_unchecked_t mmu_invalidate_range;
34203+ atomic_long_unchecked_t mmu_invalidate_page;
34204+ atomic_long_unchecked_t flush_tlb;
34205+ atomic_long_unchecked_t flush_tlb_gru;
34206+ atomic_long_unchecked_t flush_tlb_gru_tgh;
34207+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
34208
34209- atomic_long_t copy_gpa;
34210- atomic_long_t read_gpa;
34211+ atomic_long_unchecked_t copy_gpa;
34212+ atomic_long_unchecked_t read_gpa;
34213
34214- atomic_long_t mesq_receive;
34215- atomic_long_t mesq_receive_none;
34216- atomic_long_t mesq_send;
34217- atomic_long_t mesq_send_failed;
34218- atomic_long_t mesq_noop;
34219- atomic_long_t mesq_send_unexpected_error;
34220- atomic_long_t mesq_send_lb_overflow;
34221- atomic_long_t mesq_send_qlimit_reached;
34222- atomic_long_t mesq_send_amo_nacked;
34223- atomic_long_t mesq_send_put_nacked;
34224- atomic_long_t mesq_page_overflow;
34225- atomic_long_t mesq_qf_locked;
34226- atomic_long_t mesq_qf_noop_not_full;
34227- atomic_long_t mesq_qf_switch_head_failed;
34228- atomic_long_t mesq_qf_unexpected_error;
34229- atomic_long_t mesq_noop_unexpected_error;
34230- atomic_long_t mesq_noop_lb_overflow;
34231- atomic_long_t mesq_noop_qlimit_reached;
34232- atomic_long_t mesq_noop_amo_nacked;
34233- atomic_long_t mesq_noop_put_nacked;
34234- atomic_long_t mesq_noop_page_overflow;
34235+ atomic_long_unchecked_t mesq_receive;
34236+ atomic_long_unchecked_t mesq_receive_none;
34237+ atomic_long_unchecked_t mesq_send;
34238+ atomic_long_unchecked_t mesq_send_failed;
34239+ atomic_long_unchecked_t mesq_noop;
34240+ atomic_long_unchecked_t mesq_send_unexpected_error;
34241+ atomic_long_unchecked_t mesq_send_lb_overflow;
34242+ atomic_long_unchecked_t mesq_send_qlimit_reached;
34243+ atomic_long_unchecked_t mesq_send_amo_nacked;
34244+ atomic_long_unchecked_t mesq_send_put_nacked;
34245+ atomic_long_unchecked_t mesq_page_overflow;
34246+ atomic_long_unchecked_t mesq_qf_locked;
34247+ atomic_long_unchecked_t mesq_qf_noop_not_full;
34248+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
34249+ atomic_long_unchecked_t mesq_qf_unexpected_error;
34250+ atomic_long_unchecked_t mesq_noop_unexpected_error;
34251+ atomic_long_unchecked_t mesq_noop_lb_overflow;
34252+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
34253+ atomic_long_unchecked_t mesq_noop_amo_nacked;
34254+ atomic_long_unchecked_t mesq_noop_put_nacked;
34255+ atomic_long_unchecked_t mesq_noop_page_overflow;
34256
34257 };
34258
34259@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
34260 tghop_invalidate, mcsop_last};
34261
34262 struct mcs_op_statistic {
34263- atomic_long_t count;
34264- atomic_long_t total;
34265+ atomic_long_unchecked_t count;
34266+ atomic_long_unchecked_t total;
34267 unsigned long max;
34268 };
34269
34270@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
34271
34272 #define STAT(id) do { \
34273 if (gru_options & OPT_STATS) \
34274- atomic_long_inc(&gru_stats.id); \
34275+ atomic_long_inc_unchecked(&gru_stats.id); \
34276 } while (0)
34277
34278 #ifdef CONFIG_SGI_GRU_DEBUG
34279diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
34280index c862cd4..0d176fe 100644
34281--- a/drivers/misc/sgi-xp/xp.h
34282+++ b/drivers/misc/sgi-xp/xp.h
34283@@ -288,7 +288,7 @@ struct xpc_interface {
34284 xpc_notify_func, void *);
34285 void (*received) (short, int, void *);
34286 enum xp_retval (*partid_to_nasids) (short, void *);
34287-};
34288+} __no_const;
34289
34290 extern struct xpc_interface xpc_interface;
34291
34292diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
34293index b94d5f7..7f494c5 100644
34294--- a/drivers/misc/sgi-xp/xpc.h
34295+++ b/drivers/misc/sgi-xp/xpc.h
34296@@ -835,6 +835,7 @@ struct xpc_arch_operations {
34297 void (*received_payload) (struct xpc_channel *, void *);
34298 void (*notify_senders_of_disconnect) (struct xpc_channel *);
34299 };
34300+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
34301
34302 /* struct xpc_partition act_state values (for XPC HB) */
34303
34304@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
34305 /* found in xpc_main.c */
34306 extern struct device *xpc_part;
34307 extern struct device *xpc_chan;
34308-extern struct xpc_arch_operations xpc_arch_ops;
34309+extern xpc_arch_operations_no_const xpc_arch_ops;
34310 extern int xpc_disengage_timelimit;
34311 extern int xpc_disengage_timedout;
34312 extern int xpc_activate_IRQ_rcvd;
34313diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
34314index 8d082b4..aa749ae 100644
34315--- a/drivers/misc/sgi-xp/xpc_main.c
34316+++ b/drivers/misc/sgi-xp/xpc_main.c
34317@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
34318 .notifier_call = xpc_system_die,
34319 };
34320
34321-struct xpc_arch_operations xpc_arch_ops;
34322+xpc_arch_operations_no_const xpc_arch_ops;
34323
34324 /*
34325 * Timer function to enforce the timelimit on the partition disengage.
34326diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
34327index 69ef0be..f3ef91e 100644
34328--- a/drivers/mmc/host/sdhci-pci.c
34329+++ b/drivers/mmc/host/sdhci-pci.c
34330@@ -652,7 +652,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
34331 .probe = via_probe,
34332 };
34333
34334-static const struct pci_device_id pci_ids[] __devinitdata = {
34335+static const struct pci_device_id pci_ids[] __devinitconst = {
34336 {
34337 .vendor = PCI_VENDOR_ID_RICOH,
34338 .device = PCI_DEVICE_ID_RICOH_R5C822,
34339diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
34340index a4eb8b5..8c0628f 100644
34341--- a/drivers/mtd/devices/doc2000.c
34342+++ b/drivers/mtd/devices/doc2000.c
34343@@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
34344
34345 /* The ECC will not be calculated correctly if less than 512 is written */
34346 /* DBB-
34347- if (len != 0x200 && eccbuf)
34348+ if (len != 0x200)
34349 printk(KERN_WARNING
34350 "ECC needs a full sector write (adr: %lx size %lx)\n",
34351 (long) to, (long) len);
34352diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
34353index a9e57d6..c6d8731 100644
34354--- a/drivers/mtd/nand/denali.c
34355+++ b/drivers/mtd/nand/denali.c
34356@@ -26,6 +26,7 @@
34357 #include <linux/pci.h>
34358 #include <linux/mtd/mtd.h>
34359 #include <linux/module.h>
34360+#include <linux/slab.h>
34361
34362 #include "denali.h"
34363
34364diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
34365index 51b9d6a..52af9a7 100644
34366--- a/drivers/mtd/nftlmount.c
34367+++ b/drivers/mtd/nftlmount.c
34368@@ -24,6 +24,7 @@
34369 #include <asm/errno.h>
34370 #include <linux/delay.h>
34371 #include <linux/slab.h>
34372+#include <linux/sched.h>
34373 #include <linux/mtd/mtd.h>
34374 #include <linux/mtd/nand.h>
34375 #include <linux/mtd/nftl.h>
34376diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
34377index 6762dc4..9956862 100644
34378--- a/drivers/net/ethernet/atheros/atlx/atl2.c
34379+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
34380@@ -2859,7 +2859,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
34381 */
34382
34383 #define ATL2_PARAM(X, desc) \
34384- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34385+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34386 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
34387 MODULE_PARM_DESC(X, desc);
34388 #else
34389diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34390index 61a7670..7da6e34 100644
34391--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34392+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34393@@ -483,7 +483,7 @@ struct bnx2x_rx_mode_obj {
34394
34395 int (*wait_comp)(struct bnx2x *bp,
34396 struct bnx2x_rx_mode_ramrod_params *p);
34397-};
34398+} __no_const;
34399
34400 /********************** Set multicast group ***********************************/
34401
34402diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
34403index 93865f8..5448741 100644
34404--- a/drivers/net/ethernet/broadcom/tg3.h
34405+++ b/drivers/net/ethernet/broadcom/tg3.h
34406@@ -140,6 +140,7 @@
34407 #define CHIPREV_ID_5750_A0 0x4000
34408 #define CHIPREV_ID_5750_A1 0x4001
34409 #define CHIPREV_ID_5750_A3 0x4003
34410+#define CHIPREV_ID_5750_C1 0x4201
34411 #define CHIPREV_ID_5750_C2 0x4202
34412 #define CHIPREV_ID_5752_A0_HW 0x5000
34413 #define CHIPREV_ID_5752_A0 0x6000
34414diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34415index c4e8643..0979484 100644
34416--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34417+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34418@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
34419 */
34420 struct l2t_skb_cb {
34421 arp_failure_handler_func arp_failure_handler;
34422-};
34423+} __no_const;
34424
34425 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
34426
34427diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
34428index 18b106c..2b38d36 100644
34429--- a/drivers/net/ethernet/dec/tulip/de4x5.c
34430+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
34431@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34432 for (i=0; i<ETH_ALEN; i++) {
34433 tmp.addr[i] = dev->dev_addr[i];
34434 }
34435- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34436+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34437 break;
34438
34439 case DE4X5_SET_HWADDR: /* Set the hardware address */
34440@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34441 spin_lock_irqsave(&lp->lock, flags);
34442 memcpy(&statbuf, &lp->pktStats, ioc->len);
34443 spin_unlock_irqrestore(&lp->lock, flags);
34444- if (copy_to_user(ioc->data, &statbuf, ioc->len))
34445+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
34446 return -EFAULT;
34447 break;
34448 }
34449diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
34450index ed7d1dc..d426748 100644
34451--- a/drivers/net/ethernet/dec/tulip/eeprom.c
34452+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
34453@@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
34454 {NULL}};
34455
34456
34457-static const char *block_name[] __devinitdata = {
34458+static const char *block_name[] __devinitconst = {
34459 "21140 non-MII",
34460 "21140 MII PHY",
34461 "21142 Serial PHY",
34462diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
34463index 2ac6fff..2d127d0 100644
34464--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
34465+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
34466@@ -236,7 +236,7 @@ struct pci_id_info {
34467 int drv_flags; /* Driver use, intended as capability flags. */
34468 };
34469
34470-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34471+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34472 { /* Sometime a Level-One switch card. */
34473 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
34474 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
34475diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
34476index d783f4f..97fa1b0 100644
34477--- a/drivers/net/ethernet/dlink/sundance.c
34478+++ b/drivers/net/ethernet/dlink/sundance.c
34479@@ -218,7 +218,7 @@ enum {
34480 struct pci_id_info {
34481 const char *name;
34482 };
34483-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34484+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34485 {"D-Link DFE-550TX FAST Ethernet Adapter"},
34486 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
34487 {"D-Link DFE-580TX 4 port Server Adapter"},
34488diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
34489index 528a886..e6a98a3 100644
34490--- a/drivers/net/ethernet/emulex/benet/be_main.c
34491+++ b/drivers/net/ethernet/emulex/benet/be_main.c
34492@@ -403,7 +403,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
34493
34494 if (wrapped)
34495 newacc += 65536;
34496- ACCESS_ONCE(*acc) = newacc;
34497+ ACCESS_ONCE_RW(*acc) = newacc;
34498 }
34499
34500 void be_parse_stats(struct be_adapter *adapter)
34501diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
34502index 16b0704..d2c07d7 100644
34503--- a/drivers/net/ethernet/faraday/ftgmac100.c
34504+++ b/drivers/net/ethernet/faraday/ftgmac100.c
34505@@ -31,6 +31,8 @@
34506 #include <linux/netdevice.h>
34507 #include <linux/phy.h>
34508 #include <linux/platform_device.h>
34509+#include <linux/interrupt.h>
34510+#include <linux/irqreturn.h>
34511 #include <net/ip.h>
34512
34513 #include "ftgmac100.h"
34514diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
34515index 829b109..4ae5f6a 100644
34516--- a/drivers/net/ethernet/faraday/ftmac100.c
34517+++ b/drivers/net/ethernet/faraday/ftmac100.c
34518@@ -31,6 +31,8 @@
34519 #include <linux/module.h>
34520 #include <linux/netdevice.h>
34521 #include <linux/platform_device.h>
34522+#include <linux/interrupt.h>
34523+#include <linux/irqreturn.h>
34524
34525 #include "ftmac100.h"
34526
34527diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
34528index 1637b98..c42f87b 100644
34529--- a/drivers/net/ethernet/fealnx.c
34530+++ b/drivers/net/ethernet/fealnx.c
34531@@ -150,7 +150,7 @@ struct chip_info {
34532 int flags;
34533 };
34534
34535-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
34536+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
34537 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34538 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
34539 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34540diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
34541index b83897f..b2d970f 100644
34542--- a/drivers/net/ethernet/intel/e1000e/e1000.h
34543+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
34544@@ -181,7 +181,7 @@ struct e1000_info;
34545 #define E1000_TXDCTL_DMA_BURST_ENABLE \
34546 (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
34547 E1000_TXDCTL_COUNT_DESC | \
34548- (5 << 16) | /* wthresh must be +1 more than desired */\
34549+ (1 << 16) | /* wthresh must be +1 more than desired */\
34550 (1 << 8) | /* hthresh */ \
34551 0x1f) /* pthresh */
34552
34553diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
34554index f82ecf5..7d59ecb 100644
34555--- a/drivers/net/ethernet/intel/e1000e/hw.h
34556+++ b/drivers/net/ethernet/intel/e1000e/hw.h
34557@@ -784,6 +784,7 @@ struct e1000_mac_operations {
34558 void (*config_collision_dist)(struct e1000_hw *);
34559 s32 (*read_mac_addr)(struct e1000_hw *);
34560 };
34561+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34562
34563 /*
34564 * When to use various PHY register access functions:
34565@@ -824,6 +825,7 @@ struct e1000_phy_operations {
34566 void (*power_up)(struct e1000_hw *);
34567 void (*power_down)(struct e1000_hw *);
34568 };
34569+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34570
34571 /* Function pointers for the NVM. */
34572 struct e1000_nvm_operations {
34573@@ -836,9 +838,10 @@ struct e1000_nvm_operations {
34574 s32 (*validate)(struct e1000_hw *);
34575 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
34576 };
34577+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34578
34579 struct e1000_mac_info {
34580- struct e1000_mac_operations ops;
34581+ e1000_mac_operations_no_const ops;
34582 u8 addr[ETH_ALEN];
34583 u8 perm_addr[ETH_ALEN];
34584
34585@@ -879,7 +882,7 @@ struct e1000_mac_info {
34586 };
34587
34588 struct e1000_phy_info {
34589- struct e1000_phy_operations ops;
34590+ e1000_phy_operations_no_const ops;
34591
34592 enum e1000_phy_type type;
34593
34594@@ -913,7 +916,7 @@ struct e1000_phy_info {
34595 };
34596
34597 struct e1000_nvm_info {
34598- struct e1000_nvm_operations ops;
34599+ e1000_nvm_operations_no_const ops;
34600
34601 enum e1000_nvm_type type;
34602 enum e1000_nvm_override override;
34603diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
34604index f67cbd3..cef9e3d 100644
34605--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
34606+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
34607@@ -314,6 +314,7 @@ struct e1000_mac_operations {
34608 s32 (*read_mac_addr)(struct e1000_hw *);
34609 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
34610 };
34611+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34612
34613 struct e1000_phy_operations {
34614 s32 (*acquire)(struct e1000_hw *);
34615@@ -330,6 +331,7 @@ struct e1000_phy_operations {
34616 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
34617 s32 (*write_reg)(struct e1000_hw *, u32, u16);
34618 };
34619+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34620
34621 struct e1000_nvm_operations {
34622 s32 (*acquire)(struct e1000_hw *);
34623@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
34624 s32 (*update)(struct e1000_hw *);
34625 s32 (*validate)(struct e1000_hw *);
34626 };
34627+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34628
34629 struct e1000_info {
34630 s32 (*get_invariants)(struct e1000_hw *);
34631@@ -350,7 +353,7 @@ struct e1000_info {
34632 extern const struct e1000_info e1000_82575_info;
34633
34634 struct e1000_mac_info {
34635- struct e1000_mac_operations ops;
34636+ e1000_mac_operations_no_const ops;
34637
34638 u8 addr[6];
34639 u8 perm_addr[6];
34640@@ -388,7 +391,7 @@ struct e1000_mac_info {
34641 };
34642
34643 struct e1000_phy_info {
34644- struct e1000_phy_operations ops;
34645+ e1000_phy_operations_no_const ops;
34646
34647 enum e1000_phy_type type;
34648
34649@@ -423,7 +426,7 @@ struct e1000_phy_info {
34650 };
34651
34652 struct e1000_nvm_info {
34653- struct e1000_nvm_operations ops;
34654+ e1000_nvm_operations_no_const ops;
34655 enum e1000_nvm_type type;
34656 enum e1000_nvm_override override;
34657
34658@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
34659 s32 (*check_for_ack)(struct e1000_hw *, u16);
34660 s32 (*check_for_rst)(struct e1000_hw *, u16);
34661 };
34662+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34663
34664 struct e1000_mbx_stats {
34665 u32 msgs_tx;
34666@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
34667 };
34668
34669 struct e1000_mbx_info {
34670- struct e1000_mbx_operations ops;
34671+ e1000_mbx_operations_no_const ops;
34672 struct e1000_mbx_stats stats;
34673 u32 timeout;
34674 u32 usec_delay;
34675diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
34676index 57db3c6..aa825fc 100644
34677--- a/drivers/net/ethernet/intel/igbvf/vf.h
34678+++ b/drivers/net/ethernet/intel/igbvf/vf.h
34679@@ -189,9 +189,10 @@ struct e1000_mac_operations {
34680 s32 (*read_mac_addr)(struct e1000_hw *);
34681 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
34682 };
34683+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34684
34685 struct e1000_mac_info {
34686- struct e1000_mac_operations ops;
34687+ e1000_mac_operations_no_const ops;
34688 u8 addr[6];
34689 u8 perm_addr[6];
34690
34691@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
34692 s32 (*check_for_ack)(struct e1000_hw *);
34693 s32 (*check_for_rst)(struct e1000_hw *);
34694 };
34695+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34696
34697 struct e1000_mbx_stats {
34698 u32 msgs_tx;
34699@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
34700 };
34701
34702 struct e1000_mbx_info {
34703- struct e1000_mbx_operations ops;
34704+ e1000_mbx_operations_no_const ops;
34705 struct e1000_mbx_stats stats;
34706 u32 timeout;
34707 u32 usec_delay;
34708diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34709index 8636e83..ab9bbc3 100644
34710--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34711+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34712@@ -2710,6 +2710,7 @@ struct ixgbe_eeprom_operations {
34713 s32 (*update_checksum)(struct ixgbe_hw *);
34714 u16 (*calc_checksum)(struct ixgbe_hw *);
34715 };
34716+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
34717
34718 struct ixgbe_mac_operations {
34719 s32 (*init_hw)(struct ixgbe_hw *);
34720@@ -2773,6 +2774,7 @@ struct ixgbe_mac_operations {
34721 /* Manageability interface */
34722 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
34723 };
34724+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
34725
34726 struct ixgbe_phy_operations {
34727 s32 (*identify)(struct ixgbe_hw *);
34728@@ -2792,9 +2794,10 @@ struct ixgbe_phy_operations {
34729 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
34730 s32 (*check_overtemp)(struct ixgbe_hw *);
34731 };
34732+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
34733
34734 struct ixgbe_eeprom_info {
34735- struct ixgbe_eeprom_operations ops;
34736+ ixgbe_eeprom_operations_no_const ops;
34737 enum ixgbe_eeprom_type type;
34738 u32 semaphore_delay;
34739 u16 word_size;
34740@@ -2804,7 +2807,7 @@ struct ixgbe_eeprom_info {
34741
34742 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
34743 struct ixgbe_mac_info {
34744- struct ixgbe_mac_operations ops;
34745+ ixgbe_mac_operations_no_const ops;
34746 enum ixgbe_mac_type type;
34747 u8 addr[ETH_ALEN];
34748 u8 perm_addr[ETH_ALEN];
34749@@ -2832,7 +2835,7 @@ struct ixgbe_mac_info {
34750 };
34751
34752 struct ixgbe_phy_info {
34753- struct ixgbe_phy_operations ops;
34754+ ixgbe_phy_operations_no_const ops;
34755 struct mdio_if_info mdio;
34756 enum ixgbe_phy_type type;
34757 u32 id;
34758@@ -2860,6 +2863,7 @@ struct ixgbe_mbx_operations {
34759 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
34760 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
34761 };
34762+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
34763
34764 struct ixgbe_mbx_stats {
34765 u32 msgs_tx;
34766@@ -2871,7 +2875,7 @@ struct ixgbe_mbx_stats {
34767 };
34768
34769 struct ixgbe_mbx_info {
34770- struct ixgbe_mbx_operations ops;
34771+ ixgbe_mbx_operations_no_const ops;
34772 struct ixgbe_mbx_stats stats;
34773 u32 timeout;
34774 u32 usec_delay;
34775diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
34776index 25c951d..cc7cf33 100644
34777--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
34778+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
34779@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
34780 s32 (*clear_vfta)(struct ixgbe_hw *);
34781 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
34782 };
34783+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
34784
34785 enum ixgbe_mac_type {
34786 ixgbe_mac_unknown = 0,
34787@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
34788 };
34789
34790 struct ixgbe_mac_info {
34791- struct ixgbe_mac_operations ops;
34792+ ixgbe_mac_operations_no_const ops;
34793 u8 addr[6];
34794 u8 perm_addr[6];
34795
34796@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
34797 s32 (*check_for_ack)(struct ixgbe_hw *);
34798 s32 (*check_for_rst)(struct ixgbe_hw *);
34799 };
34800+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
34801
34802 struct ixgbe_mbx_stats {
34803 u32 msgs_tx;
34804@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
34805 };
34806
34807 struct ixgbe_mbx_info {
34808- struct ixgbe_mbx_operations ops;
34809+ ixgbe_mbx_operations_no_const ops;
34810 struct ixgbe_mbx_stats stats;
34811 u32 timeout;
34812 u32 udelay;
34813diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
34814index 8bb05b4..074796f 100644
34815--- a/drivers/net/ethernet/mellanox/mlx4/main.c
34816+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
34817@@ -41,6 +41,7 @@
34818 #include <linux/slab.h>
34819 #include <linux/io-mapping.h>
34820 #include <linux/delay.h>
34821+#include <linux/sched.h>
34822
34823 #include <linux/mlx4/device.h>
34824 #include <linux/mlx4/doorbell.h>
34825diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
34826index 5046a64..71ca936 100644
34827--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
34828+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
34829@@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
34830 void (*link_down)(struct __vxge_hw_device *devh);
34831 void (*crit_err)(struct __vxge_hw_device *devh,
34832 enum vxge_hw_event type, u64 ext_data);
34833-};
34834+} __no_const;
34835
34836 /*
34837 * struct __vxge_hw_blockpool_entry - Block private data structure
34838diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34839index 4a518a3..936b334 100644
34840--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34841+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34842@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
34843 struct vxge_hw_mempool_dma *dma_object,
34844 u32 index,
34845 u32 is_last);
34846-};
34847+} __no_const;
34848
34849 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
34850 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
34851diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
34852index ce6b44d..74f10c2 100644
34853--- a/drivers/net/ethernet/realtek/r8169.c
34854+++ b/drivers/net/ethernet/realtek/r8169.c
34855@@ -708,17 +708,17 @@ struct rtl8169_private {
34856 struct mdio_ops {
34857 void (*write)(void __iomem *, int, int);
34858 int (*read)(void __iomem *, int);
34859- } mdio_ops;
34860+ } __no_const mdio_ops;
34861
34862 struct pll_power_ops {
34863 void (*down)(struct rtl8169_private *);
34864 void (*up)(struct rtl8169_private *);
34865- } pll_power_ops;
34866+ } __no_const pll_power_ops;
34867
34868 struct jumbo_ops {
34869 void (*enable)(struct rtl8169_private *);
34870 void (*disable)(struct rtl8169_private *);
34871- } jumbo_ops;
34872+ } __no_const jumbo_ops;
34873
34874 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
34875 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
34876diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
34877index a9deda8..5507c31 100644
34878--- a/drivers/net/ethernet/sis/sis190.c
34879+++ b/drivers/net/ethernet/sis/sis190.c
34880@@ -1620,7 +1620,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
34881 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
34882 struct net_device *dev)
34883 {
34884- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
34885+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
34886 struct sis190_private *tp = netdev_priv(dev);
34887 struct pci_dev *isa_bridge;
34888 u8 reg, tmp8;
34889diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
34890index c07cfe9..81cbf7e 100644
34891--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
34892+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
34893@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
34894
34895 writel(value, ioaddr + MMC_CNTRL);
34896
34897- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
34898- MMC_CNTRL, value);
34899+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
34900+// MMC_CNTRL, value);
34901 }
34902
34903 /* To mask all all interrupts.*/
34904diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
34905index 48d56da..a27e46c 100644
34906--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
34907+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
34908@@ -1584,7 +1584,7 @@ static const struct file_operations stmmac_rings_status_fops = {
34909 .open = stmmac_sysfs_ring_open,
34910 .read = seq_read,
34911 .llseek = seq_lseek,
34912- .release = seq_release,
34913+ .release = single_release,
34914 };
34915
34916 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
34917@@ -1656,7 +1656,7 @@ static const struct file_operations stmmac_dma_cap_fops = {
34918 .open = stmmac_sysfs_dma_cap_open,
34919 .read = seq_read,
34920 .llseek = seq_lseek,
34921- .release = seq_release,
34922+ .release = single_release,
34923 };
34924
34925 static int stmmac_init_fs(struct net_device *dev)
34926diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
34927index c358245..8c1de63 100644
34928--- a/drivers/net/hyperv/hyperv_net.h
34929+++ b/drivers/net/hyperv/hyperv_net.h
34930@@ -98,7 +98,7 @@ struct rndis_device {
34931
34932 enum rndis_device_state state;
34933 bool link_state;
34934- atomic_t new_req_id;
34935+ atomic_unchecked_t new_req_id;
34936
34937 spinlock_t request_lock;
34938 struct list_head req_list;
34939diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
34940index d6be64b..5d97e3b 100644
34941--- a/drivers/net/hyperv/rndis_filter.c
34942+++ b/drivers/net/hyperv/rndis_filter.c
34943@@ -97,7 +97,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
34944 * template
34945 */
34946 set = &rndis_msg->msg.set_req;
34947- set->req_id = atomic_inc_return(&dev->new_req_id);
34948+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34949
34950 /* Add to the request list */
34951 spin_lock_irqsave(&dev->request_lock, flags);
34952@@ -648,7 +648,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
34953
34954 /* Setup the rndis set */
34955 halt = &request->request_msg.msg.halt_req;
34956- halt->req_id = atomic_inc_return(&dev->new_req_id);
34957+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34958
34959 /* Ignore return since this msg is optional. */
34960 rndis_filter_send_request(dev, request);
34961diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
34962index cb8fd50..003ec38 100644
34963--- a/drivers/net/macvtap.c
34964+++ b/drivers/net/macvtap.c
34965@@ -528,6 +528,8 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
34966 }
34967 base = (unsigned long)from->iov_base + offset1;
34968 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
34969+ if (i + size >= MAX_SKB_FRAGS)
34970+ return -EFAULT;
34971 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
34972 if ((num_pages != size) ||
34973 (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags))
34974diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
34975index 21d7151..8034208 100644
34976--- a/drivers/net/ppp/ppp_generic.c
34977+++ b/drivers/net/ppp/ppp_generic.c
34978@@ -986,7 +986,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34979 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
34980 struct ppp_stats stats;
34981 struct ppp_comp_stats cstats;
34982- char *vers;
34983
34984 switch (cmd) {
34985 case SIOCGPPPSTATS:
34986@@ -1008,8 +1007,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34987 break;
34988
34989 case SIOCGPPPVER:
34990- vers = PPP_VERSION;
34991- if (copy_to_user(addr, vers, strlen(vers) + 1))
34992+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
34993 break;
34994 err = 0;
34995 break;
34996diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
34997index b715e6b..6d2490f 100644
34998--- a/drivers/net/tokenring/abyss.c
34999+++ b/drivers/net/tokenring/abyss.c
35000@@ -450,10 +450,12 @@ static struct pci_driver abyss_driver = {
35001
35002 static int __init abyss_init (void)
35003 {
35004- abyss_netdev_ops = tms380tr_netdev_ops;
35005+ pax_open_kernel();
35006+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35007
35008- abyss_netdev_ops.ndo_open = abyss_open;
35009- abyss_netdev_ops.ndo_stop = abyss_close;
35010+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
35011+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
35012+ pax_close_kernel();
35013
35014 return pci_register_driver(&abyss_driver);
35015 }
35016diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
35017index 28adcdf..ae82f35 100644
35018--- a/drivers/net/tokenring/madgemc.c
35019+++ b/drivers/net/tokenring/madgemc.c
35020@@ -742,9 +742,11 @@ static struct mca_driver madgemc_driver = {
35021
35022 static int __init madgemc_init (void)
35023 {
35024- madgemc_netdev_ops = tms380tr_netdev_ops;
35025- madgemc_netdev_ops.ndo_open = madgemc_open;
35026- madgemc_netdev_ops.ndo_stop = madgemc_close;
35027+ pax_open_kernel();
35028+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35029+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
35030+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
35031+ pax_close_kernel();
35032
35033 return mca_register_driver (&madgemc_driver);
35034 }
35035diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
35036index 62d90e4..9d84237 100644
35037--- a/drivers/net/tokenring/proteon.c
35038+++ b/drivers/net/tokenring/proteon.c
35039@@ -352,9 +352,11 @@ static int __init proteon_init(void)
35040 struct platform_device *pdev;
35041 int i, num = 0, err = 0;
35042
35043- proteon_netdev_ops = tms380tr_netdev_ops;
35044- proteon_netdev_ops.ndo_open = proteon_open;
35045- proteon_netdev_ops.ndo_stop = tms380tr_close;
35046+ pax_open_kernel();
35047+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35048+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
35049+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
35050+ pax_close_kernel();
35051
35052 err = platform_driver_register(&proteon_driver);
35053 if (err)
35054diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
35055index ee11e93..c8f19c7 100644
35056--- a/drivers/net/tokenring/skisa.c
35057+++ b/drivers/net/tokenring/skisa.c
35058@@ -362,9 +362,11 @@ static int __init sk_isa_init(void)
35059 struct platform_device *pdev;
35060 int i, num = 0, err = 0;
35061
35062- sk_isa_netdev_ops = tms380tr_netdev_ops;
35063- sk_isa_netdev_ops.ndo_open = sk_isa_open;
35064- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35065+ pax_open_kernel();
35066+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35067+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
35068+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35069+ pax_close_kernel();
35070
35071 err = platform_driver_register(&sk_isa_driver);
35072 if (err)
35073diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
35074index 2d2a688..35f2372 100644
35075--- a/drivers/net/usb/hso.c
35076+++ b/drivers/net/usb/hso.c
35077@@ -71,7 +71,7 @@
35078 #include <asm/byteorder.h>
35079 #include <linux/serial_core.h>
35080 #include <linux/serial.h>
35081-
35082+#include <asm/local.h>
35083
35084 #define MOD_AUTHOR "Option Wireless"
35085 #define MOD_DESCRIPTION "USB High Speed Option driver"
35086@@ -257,7 +257,7 @@ struct hso_serial {
35087
35088 /* from usb_serial_port */
35089 struct tty_struct *tty;
35090- int open_count;
35091+ local_t open_count;
35092 spinlock_t serial_lock;
35093
35094 int (*write_data) (struct hso_serial *serial);
35095@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
35096 struct urb *urb;
35097
35098 urb = serial->rx_urb[0];
35099- if (serial->open_count > 0) {
35100+ if (local_read(&serial->open_count) > 0) {
35101 count = put_rxbuf_data(urb, serial);
35102 if (count == -1)
35103 return;
35104@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
35105 DUMP1(urb->transfer_buffer, urb->actual_length);
35106
35107 /* Anyone listening? */
35108- if (serial->open_count == 0)
35109+ if (local_read(&serial->open_count) == 0)
35110 return;
35111
35112 if (status == 0) {
35113@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35114 spin_unlock_irq(&serial->serial_lock);
35115
35116 /* check for port already opened, if not set the termios */
35117- serial->open_count++;
35118- if (serial->open_count == 1) {
35119+ if (local_inc_return(&serial->open_count) == 1) {
35120 serial->rx_state = RX_IDLE;
35121 /* Force default termio settings */
35122 _hso_serial_set_termios(tty, NULL);
35123@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35124 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
35125 if (result) {
35126 hso_stop_serial_device(serial->parent);
35127- serial->open_count--;
35128+ local_dec(&serial->open_count);
35129 kref_put(&serial->parent->ref, hso_serial_ref_free);
35130 }
35131 } else {
35132@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
35133
35134 /* reset the rts and dtr */
35135 /* do the actual close */
35136- serial->open_count--;
35137+ local_dec(&serial->open_count);
35138
35139- if (serial->open_count <= 0) {
35140- serial->open_count = 0;
35141+ if (local_read(&serial->open_count) <= 0) {
35142+ local_set(&serial->open_count, 0);
35143 spin_lock_irq(&serial->serial_lock);
35144 if (serial->tty == tty) {
35145 serial->tty->driver_data = NULL;
35146@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
35147
35148 /* the actual setup */
35149 spin_lock_irqsave(&serial->serial_lock, flags);
35150- if (serial->open_count)
35151+ if (local_read(&serial->open_count))
35152 _hso_serial_set_termios(tty, old);
35153 else
35154 tty->termios = old;
35155@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
35156 D1("Pending read interrupt on port %d\n", i);
35157 spin_lock(&serial->serial_lock);
35158 if (serial->rx_state == RX_IDLE &&
35159- serial->open_count > 0) {
35160+ local_read(&serial->open_count) > 0) {
35161 /* Setup and send a ctrl req read on
35162 * port i */
35163 if (!serial->rx_urb_filled[0]) {
35164@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
35165 /* Start all serial ports */
35166 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
35167 if (serial_table[i] && (serial_table[i]->interface == iface)) {
35168- if (dev2ser(serial_table[i])->open_count) {
35169+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
35170 result =
35171 hso_start_serial_device(serial_table[i], GFP_NOIO);
35172 hso_kick_transmit(dev2ser(serial_table[i]));
35173diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
35174index c54b7d37..af1f359 100644
35175--- a/drivers/net/wireless/ath/ath.h
35176+++ b/drivers/net/wireless/ath/ath.h
35177@@ -119,6 +119,7 @@ struct ath_ops {
35178 void (*write_flush) (void *);
35179 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
35180 };
35181+typedef struct ath_ops __no_const ath_ops_no_const;
35182
35183 struct ath_common;
35184 struct ath_bus_ops;
35185diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35186index aa2abaf..5f5152d 100644
35187--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35188+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35189@@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35190 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
35191 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
35192
35193- ACCESS_ONCE(ads->ds_link) = i->link;
35194- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
35195+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
35196+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
35197
35198 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
35199 ctl6 = SM(i->keytype, AR_EncrType);
35200@@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35201
35202 if ((i->is_first || i->is_last) &&
35203 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
35204- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
35205+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
35206 | set11nTries(i->rates, 1)
35207 | set11nTries(i->rates, 2)
35208 | set11nTries(i->rates, 3)
35209 | (i->dur_update ? AR_DurUpdateEna : 0)
35210 | SM(0, AR_BurstDur);
35211
35212- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
35213+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
35214 | set11nRate(i->rates, 1)
35215 | set11nRate(i->rates, 2)
35216 | set11nRate(i->rates, 3);
35217 } else {
35218- ACCESS_ONCE(ads->ds_ctl2) = 0;
35219- ACCESS_ONCE(ads->ds_ctl3) = 0;
35220+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
35221+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
35222 }
35223
35224 if (!i->is_first) {
35225- ACCESS_ONCE(ads->ds_ctl0) = 0;
35226- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35227- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35228+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
35229+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35230+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35231 return;
35232 }
35233
35234@@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35235 break;
35236 }
35237
35238- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35239+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35240 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35241 | SM(i->txpower, AR_XmitPower)
35242 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35243@@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35244 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
35245 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
35246
35247- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35248- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35249+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35250+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35251
35252 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
35253 return;
35254
35255- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35256+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35257 | set11nPktDurRTSCTS(i->rates, 1);
35258
35259- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35260+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35261 | set11nPktDurRTSCTS(i->rates, 3);
35262
35263- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35264+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35265 | set11nRateFlags(i->rates, 1)
35266 | set11nRateFlags(i->rates, 2)
35267 | set11nRateFlags(i->rates, 3)
35268diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35269index a66a13b..0ef399e 100644
35270--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35271+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35272@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35273 (i->qcu << AR_TxQcuNum_S) | desc_len;
35274
35275 checksum += val;
35276- ACCESS_ONCE(ads->info) = val;
35277+ ACCESS_ONCE_RW(ads->info) = val;
35278
35279 checksum += i->link;
35280- ACCESS_ONCE(ads->link) = i->link;
35281+ ACCESS_ONCE_RW(ads->link) = i->link;
35282
35283 checksum += i->buf_addr[0];
35284- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
35285+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
35286 checksum += i->buf_addr[1];
35287- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
35288+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
35289 checksum += i->buf_addr[2];
35290- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
35291+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
35292 checksum += i->buf_addr[3];
35293- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
35294+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
35295
35296 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
35297- ACCESS_ONCE(ads->ctl3) = val;
35298+ ACCESS_ONCE_RW(ads->ctl3) = val;
35299 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
35300- ACCESS_ONCE(ads->ctl5) = val;
35301+ ACCESS_ONCE_RW(ads->ctl5) = val;
35302 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
35303- ACCESS_ONCE(ads->ctl7) = val;
35304+ ACCESS_ONCE_RW(ads->ctl7) = val;
35305 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
35306- ACCESS_ONCE(ads->ctl9) = val;
35307+ ACCESS_ONCE_RW(ads->ctl9) = val;
35308
35309 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
35310- ACCESS_ONCE(ads->ctl10) = checksum;
35311+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
35312
35313 if (i->is_first || i->is_last) {
35314- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
35315+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
35316 | set11nTries(i->rates, 1)
35317 | set11nTries(i->rates, 2)
35318 | set11nTries(i->rates, 3)
35319 | (i->dur_update ? AR_DurUpdateEna : 0)
35320 | SM(0, AR_BurstDur);
35321
35322- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
35323+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
35324 | set11nRate(i->rates, 1)
35325 | set11nRate(i->rates, 2)
35326 | set11nRate(i->rates, 3);
35327 } else {
35328- ACCESS_ONCE(ads->ctl13) = 0;
35329- ACCESS_ONCE(ads->ctl14) = 0;
35330+ ACCESS_ONCE_RW(ads->ctl13) = 0;
35331+ ACCESS_ONCE_RW(ads->ctl14) = 0;
35332 }
35333
35334 ads->ctl20 = 0;
35335@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35336
35337 ctl17 = SM(i->keytype, AR_EncrType);
35338 if (!i->is_first) {
35339- ACCESS_ONCE(ads->ctl11) = 0;
35340- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35341- ACCESS_ONCE(ads->ctl15) = 0;
35342- ACCESS_ONCE(ads->ctl16) = 0;
35343- ACCESS_ONCE(ads->ctl17) = ctl17;
35344- ACCESS_ONCE(ads->ctl18) = 0;
35345- ACCESS_ONCE(ads->ctl19) = 0;
35346+ ACCESS_ONCE_RW(ads->ctl11) = 0;
35347+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35348+ ACCESS_ONCE_RW(ads->ctl15) = 0;
35349+ ACCESS_ONCE_RW(ads->ctl16) = 0;
35350+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35351+ ACCESS_ONCE_RW(ads->ctl18) = 0;
35352+ ACCESS_ONCE_RW(ads->ctl19) = 0;
35353 return;
35354 }
35355
35356- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35357+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35358 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35359 | SM(i->txpower, AR_XmitPower)
35360 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35361@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35362 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
35363 ctl12 |= SM(val, AR_PAPRDChainMask);
35364
35365- ACCESS_ONCE(ads->ctl12) = ctl12;
35366- ACCESS_ONCE(ads->ctl17) = ctl17;
35367+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
35368+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35369
35370- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35371+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35372 | set11nPktDurRTSCTS(i->rates, 1);
35373
35374- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35375+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35376 | set11nPktDurRTSCTS(i->rates, 3);
35377
35378- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
35379+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
35380 | set11nRateFlags(i->rates, 1)
35381 | set11nRateFlags(i->rates, 2)
35382 | set11nRateFlags(i->rates, 3)
35383 | SM(i->rtscts_rate, AR_RTSCTSRate);
35384
35385- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
35386+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
35387 }
35388
35389 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
35390diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
35391index e88f182..4e57f5d 100644
35392--- a/drivers/net/wireless/ath/ath9k/hw.h
35393+++ b/drivers/net/wireless/ath/ath9k/hw.h
35394@@ -614,7 +614,7 @@ struct ath_hw_private_ops {
35395
35396 /* ANI */
35397 void (*ani_cache_ini_regs)(struct ath_hw *ah);
35398-};
35399+} __no_const;
35400
35401 /**
35402 * struct ath_hw_ops - callbacks used by hardware code and driver code
35403@@ -644,7 +644,7 @@ struct ath_hw_ops {
35404 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
35405 struct ath_hw_antcomb_conf *antconf);
35406
35407-};
35408+} __no_const;
35409
35410 struct ath_nf_limits {
35411 s16 max;
35412@@ -664,7 +664,7 @@ enum ath_cal_list {
35413 #define AH_FASTCC 0x4
35414
35415 struct ath_hw {
35416- struct ath_ops reg_ops;
35417+ ath_ops_no_const reg_ops;
35418
35419 struct ieee80211_hw *hw;
35420 struct ath_common common;
35421diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35422index af00e2c..ab04d34 100644
35423--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35424+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35425@@ -545,7 +545,7 @@ struct phy_func_ptr {
35426 void (*carrsuppr)(struct brcms_phy *);
35427 s32 (*rxsigpwr)(struct brcms_phy *, s32);
35428 void (*detach)(struct brcms_phy *);
35429-};
35430+} __no_const;
35431
35432 struct brcms_phy {
35433 struct brcms_phy_pub pubpi_ro;
35434diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
35435index faec404..a5277f1 100644
35436--- a/drivers/net/wireless/iwlegacy/3945-mac.c
35437+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
35438@@ -3611,7 +3611,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
35439 */
35440 if (il3945_mod_params.disable_hw_scan) {
35441 D_INFO("Disabling hw_scan\n");
35442- il3945_mac_ops.hw_scan = NULL;
35443+ pax_open_kernel();
35444+ *(void **)&il3945_mac_ops.hw_scan = NULL;
35445+ pax_close_kernel();
35446 }
35447
35448 D_INFO("*** LOAD DRIVER ***\n");
35449diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
35450index b7ce6a6..5649756 100644
35451--- a/drivers/net/wireless/mac80211_hwsim.c
35452+++ b/drivers/net/wireless/mac80211_hwsim.c
35453@@ -1721,9 +1721,11 @@ static int __init init_mac80211_hwsim(void)
35454 return -EINVAL;
35455
35456 if (fake_hw_scan) {
35457- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35458- mac80211_hwsim_ops.sw_scan_start = NULL;
35459- mac80211_hwsim_ops.sw_scan_complete = NULL;
35460+ pax_open_kernel();
35461+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35462+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
35463+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
35464+ pax_close_kernel();
35465 }
35466
35467 spin_lock_init(&hwsim_radio_lock);
35468diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
35469index 35225e9..95e6bf9 100644
35470--- a/drivers/net/wireless/mwifiex/main.h
35471+++ b/drivers/net/wireless/mwifiex/main.h
35472@@ -537,7 +537,7 @@ struct mwifiex_if_ops {
35473 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
35474 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
35475 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
35476-};
35477+} __no_const;
35478
35479 struct mwifiex_adapter {
35480 u8 iface_type;
35481diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
35482index d66e298..55b0a89 100644
35483--- a/drivers/net/wireless/rndis_wlan.c
35484+++ b/drivers/net/wireless/rndis_wlan.c
35485@@ -1278,7 +1278,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
35486
35487 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
35488
35489- if (rts_threshold < 0 || rts_threshold > 2347)
35490+ if (rts_threshold > 2347)
35491 rts_threshold = 2347;
35492
35493 tmp = cpu_to_le32(rts_threshold);
35494diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
35495index 9d8f581..0f6589e 100644
35496--- a/drivers/net/wireless/wl1251/wl1251.h
35497+++ b/drivers/net/wireless/wl1251/wl1251.h
35498@@ -266,7 +266,7 @@ struct wl1251_if_operations {
35499 void (*reset)(struct wl1251 *wl);
35500 void (*enable_irq)(struct wl1251 *wl);
35501 void (*disable_irq)(struct wl1251 *wl);
35502-};
35503+} __no_const;
35504
35505 struct wl1251 {
35506 struct ieee80211_hw *hw;
35507diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
35508index f34b5b2..b5abb9f 100644
35509--- a/drivers/oprofile/buffer_sync.c
35510+++ b/drivers/oprofile/buffer_sync.c
35511@@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
35512 if (cookie == NO_COOKIE)
35513 offset = pc;
35514 if (cookie == INVALID_COOKIE) {
35515- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35516+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35517 offset = pc;
35518 }
35519 if (cookie != last_cookie) {
35520@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
35521 /* add userspace sample */
35522
35523 if (!mm) {
35524- atomic_inc(&oprofile_stats.sample_lost_no_mm);
35525+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35526 return 0;
35527 }
35528
35529 cookie = lookup_dcookie(mm, s->eip, &offset);
35530
35531 if (cookie == INVALID_COOKIE) {
35532- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35533+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35534 return 0;
35535 }
35536
35537@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
35538 /* ignore backtraces if failed to add a sample */
35539 if (state == sb_bt_start) {
35540 state = sb_bt_ignore;
35541- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35542+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35543 }
35544 }
35545 release_mm(mm);
35546diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
35547index c0cc4e7..44d4e54 100644
35548--- a/drivers/oprofile/event_buffer.c
35549+++ b/drivers/oprofile/event_buffer.c
35550@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
35551 }
35552
35553 if (buffer_pos == buffer_size) {
35554- atomic_inc(&oprofile_stats.event_lost_overflow);
35555+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35556 return;
35557 }
35558
35559diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
35560index ed2c3ec..deda85a 100644
35561--- a/drivers/oprofile/oprof.c
35562+++ b/drivers/oprofile/oprof.c
35563@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
35564 if (oprofile_ops.switch_events())
35565 return;
35566
35567- atomic_inc(&oprofile_stats.multiplex_counter);
35568+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35569 start_switch_worker();
35570 }
35571
35572diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
35573index 917d28e..d62d981 100644
35574--- a/drivers/oprofile/oprofile_stats.c
35575+++ b/drivers/oprofile/oprofile_stats.c
35576@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35577 cpu_buf->sample_invalid_eip = 0;
35578 }
35579
35580- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35581- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35582- atomic_set(&oprofile_stats.event_lost_overflow, 0);
35583- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35584- atomic_set(&oprofile_stats.multiplex_counter, 0);
35585+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35586+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35587+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35588+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35589+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35590 }
35591
35592
35593diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
35594index 38b6fc0..b5cbfce 100644
35595--- a/drivers/oprofile/oprofile_stats.h
35596+++ b/drivers/oprofile/oprofile_stats.h
35597@@ -13,11 +13,11 @@
35598 #include <linux/atomic.h>
35599
35600 struct oprofile_stat_struct {
35601- atomic_t sample_lost_no_mm;
35602- atomic_t sample_lost_no_mapping;
35603- atomic_t bt_lost_no_mapping;
35604- atomic_t event_lost_overflow;
35605- atomic_t multiplex_counter;
35606+ atomic_unchecked_t sample_lost_no_mm;
35607+ atomic_unchecked_t sample_lost_no_mapping;
35608+ atomic_unchecked_t bt_lost_no_mapping;
35609+ atomic_unchecked_t event_lost_overflow;
35610+ atomic_unchecked_t multiplex_counter;
35611 };
35612
35613 extern struct oprofile_stat_struct oprofile_stats;
35614diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
35615index 849357c..b83c1e0 100644
35616--- a/drivers/oprofile/oprofilefs.c
35617+++ b/drivers/oprofile/oprofilefs.c
35618@@ -185,7 +185,7 @@ static const struct file_operations atomic_ro_fops = {
35619
35620
35621 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35622- char const *name, atomic_t *val)
35623+ char const *name, atomic_unchecked_t *val)
35624 {
35625 return __oprofilefs_create_file(sb, root, name,
35626 &atomic_ro_fops, 0444, val);
35627diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
35628index 3f56bc0..707d642 100644
35629--- a/drivers/parport/procfs.c
35630+++ b/drivers/parport/procfs.c
35631@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
35632
35633 *ppos += len;
35634
35635- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35636+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35637 }
35638
35639 #ifdef CONFIG_PARPORT_1284
35640@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
35641
35642 *ppos += len;
35643
35644- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35645+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35646 }
35647 #endif /* IEEE1284.3 support. */
35648
35649diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
35650index 9fff878..ad0ad53 100644
35651--- a/drivers/pci/hotplug/cpci_hotplug.h
35652+++ b/drivers/pci/hotplug/cpci_hotplug.h
35653@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35654 int (*hardware_test) (struct slot* slot, u32 value);
35655 u8 (*get_power) (struct slot* slot);
35656 int (*set_power) (struct slot* slot, int value);
35657-};
35658+} __no_const;
35659
35660 struct cpci_hp_controller {
35661 unsigned int irq;
35662diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
35663index 76ba8a1..20ca857 100644
35664--- a/drivers/pci/hotplug/cpqphp_nvram.c
35665+++ b/drivers/pci/hotplug/cpqphp_nvram.c
35666@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
35667
35668 void compaq_nvram_init (void __iomem *rom_start)
35669 {
35670+
35671+#ifndef CONFIG_PAX_KERNEXEC
35672 if (rom_start) {
35673 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35674 }
35675+#endif
35676+
35677 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35678
35679 /* initialize our int15 lock */
35680diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
35681index b500840..d7159d3 100644
35682--- a/drivers/pci/pcie/aspm.c
35683+++ b/drivers/pci/pcie/aspm.c
35684@@ -27,9 +27,9 @@
35685 #define MODULE_PARAM_PREFIX "pcie_aspm."
35686
35687 /* Note: those are not register definitions */
35688-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35689-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35690-#define ASPM_STATE_L1 (4) /* L1 state */
35691+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35692+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35693+#define ASPM_STATE_L1 (4U) /* L1 state */
35694 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35695 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35696
35697diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
35698index 5e1ca3c..08082fe 100644
35699--- a/drivers/pci/probe.c
35700+++ b/drivers/pci/probe.c
35701@@ -215,7 +215,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
35702 u16 orig_cmd;
35703 struct pci_bus_region region;
35704
35705- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
35706+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
35707
35708 if (!dev->mmio_always_on) {
35709 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
35710diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
35711index 27911b5..5b6db88 100644
35712--- a/drivers/pci/proc.c
35713+++ b/drivers/pci/proc.c
35714@@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
35715 static int __init pci_proc_init(void)
35716 {
35717 struct pci_dev *dev = NULL;
35718+
35719+#ifdef CONFIG_GRKERNSEC_PROC_ADD
35720+#ifdef CONFIG_GRKERNSEC_PROC_USER
35721+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
35722+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
35723+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
35724+#endif
35725+#else
35726 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
35727+#endif
35728 proc_create("devices", 0, proc_bus_pci_dir,
35729 &proc_bus_pci_dev_operations);
35730 proc_initialized = 1;
35731diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
35732index d68c000..f6094ca 100644
35733--- a/drivers/platform/x86/thinkpad_acpi.c
35734+++ b/drivers/platform/x86/thinkpad_acpi.c
35735@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
35736 return 0;
35737 }
35738
35739-void static hotkey_mask_warn_incomplete_mask(void)
35740+static void hotkey_mask_warn_incomplete_mask(void)
35741 {
35742 /* log only what the user can fix... */
35743 const u32 wantedmask = hotkey_driver_mask &
35744@@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
35745 }
35746 }
35747
35748-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35749- struct tp_nvram_state *newn,
35750- const u32 event_mask)
35751-{
35752-
35753 #define TPACPI_COMPARE_KEY(__scancode, __member) \
35754 do { \
35755 if ((event_mask & (1 << __scancode)) && \
35756@@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35757 tpacpi_hotkey_send_key(__scancode); \
35758 } while (0)
35759
35760- void issue_volchange(const unsigned int oldvol,
35761- const unsigned int newvol)
35762- {
35763- unsigned int i = oldvol;
35764+static void issue_volchange(const unsigned int oldvol,
35765+ const unsigned int newvol,
35766+ const u32 event_mask)
35767+{
35768+ unsigned int i = oldvol;
35769
35770- while (i > newvol) {
35771- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35772- i--;
35773- }
35774- while (i < newvol) {
35775- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35776- i++;
35777- }
35778+ while (i > newvol) {
35779+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35780+ i--;
35781 }
35782+ while (i < newvol) {
35783+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35784+ i++;
35785+ }
35786+}
35787
35788- void issue_brightnesschange(const unsigned int oldbrt,
35789- const unsigned int newbrt)
35790- {
35791- unsigned int i = oldbrt;
35792+static void issue_brightnesschange(const unsigned int oldbrt,
35793+ const unsigned int newbrt,
35794+ const u32 event_mask)
35795+{
35796+ unsigned int i = oldbrt;
35797
35798- while (i > newbrt) {
35799- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35800- i--;
35801- }
35802- while (i < newbrt) {
35803- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35804- i++;
35805- }
35806+ while (i > newbrt) {
35807+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35808+ i--;
35809+ }
35810+ while (i < newbrt) {
35811+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35812+ i++;
35813 }
35814+}
35815
35816+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35817+ struct tp_nvram_state *newn,
35818+ const u32 event_mask)
35819+{
35820 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
35821 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
35822 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
35823@@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35824 oldn->volume_level != newn->volume_level) {
35825 /* recently muted, or repeated mute keypress, or
35826 * multiple presses ending in mute */
35827- issue_volchange(oldn->volume_level, newn->volume_level);
35828+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35829 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
35830 }
35831 } else {
35832@@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35833 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35834 }
35835 if (oldn->volume_level != newn->volume_level) {
35836- issue_volchange(oldn->volume_level, newn->volume_level);
35837+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35838 } else if (oldn->volume_toggle != newn->volume_toggle) {
35839 /* repeated vol up/down keypress at end of scale ? */
35840 if (newn->volume_level == 0)
35841@@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35842 /* handle brightness */
35843 if (oldn->brightness_level != newn->brightness_level) {
35844 issue_brightnesschange(oldn->brightness_level,
35845- newn->brightness_level);
35846+ newn->brightness_level,
35847+ event_mask);
35848 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
35849 /* repeated key presses that didn't change state */
35850 if (newn->brightness_level == 0)
35851@@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35852 && !tp_features.bright_unkfw)
35853 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35854 }
35855+}
35856
35857 #undef TPACPI_COMPARE_KEY
35858 #undef TPACPI_MAY_SEND_KEY
35859-}
35860
35861 /*
35862 * Polling driver
35863diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
35864index 769d265..a3a05ca 100644
35865--- a/drivers/pnp/pnpbios/bioscalls.c
35866+++ b/drivers/pnp/pnpbios/bioscalls.c
35867@@ -58,7 +58,7 @@ do { \
35868 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
35869 } while(0)
35870
35871-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
35872+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
35873 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
35874
35875 /*
35876@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35877
35878 cpu = get_cpu();
35879 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
35880+
35881+ pax_open_kernel();
35882 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
35883+ pax_close_kernel();
35884
35885 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
35886 spin_lock_irqsave(&pnp_bios_lock, flags);
35887@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35888 :"memory");
35889 spin_unlock_irqrestore(&pnp_bios_lock, flags);
35890
35891+ pax_open_kernel();
35892 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
35893+ pax_close_kernel();
35894+
35895 put_cpu();
35896
35897 /* If we get here and this is set then the PnP BIOS faulted on us. */
35898@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
35899 return status;
35900 }
35901
35902-void pnpbios_calls_init(union pnp_bios_install_struct *header)
35903+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
35904 {
35905 int i;
35906
35907@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35908 pnp_bios_callpoint.offset = header->fields.pm16offset;
35909 pnp_bios_callpoint.segment = PNP_CS16;
35910
35911+ pax_open_kernel();
35912+
35913 for_each_possible_cpu(i) {
35914 struct desc_struct *gdt = get_cpu_gdt_table(i);
35915 if (!gdt)
35916@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35917 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
35918 (unsigned long)__va(header->fields.pm16dseg));
35919 }
35920+
35921+ pax_close_kernel();
35922 }
35923diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
35924index b0ecacb..7c9da2e 100644
35925--- a/drivers/pnp/resource.c
35926+++ b/drivers/pnp/resource.c
35927@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
35928 return 1;
35929
35930 /* check if the resource is valid */
35931- if (*irq < 0 || *irq > 15)
35932+ if (*irq > 15)
35933 return 0;
35934
35935 /* check if the resource is reserved */
35936@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
35937 return 1;
35938
35939 /* check if the resource is valid */
35940- if (*dma < 0 || *dma == 4 || *dma > 7)
35941+ if (*dma == 4 || *dma > 7)
35942 return 0;
35943
35944 /* check if the resource is reserved */
35945diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
35946index 222ccd8..6275fa5 100644
35947--- a/drivers/power/bq27x00_battery.c
35948+++ b/drivers/power/bq27x00_battery.c
35949@@ -72,7 +72,7 @@
35950 struct bq27x00_device_info;
35951 struct bq27x00_access_methods {
35952 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
35953-};
35954+} __no_const;
35955
35956 enum bq27x00_chip { BQ27000, BQ27500 };
35957
35958diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
35959index 4c5b053..104263e 100644
35960--- a/drivers/regulator/max8660.c
35961+++ b/drivers/regulator/max8660.c
35962@@ -385,8 +385,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
35963 max8660->shadow_regs[MAX8660_OVER1] = 5;
35964 } else {
35965 /* Otherwise devices can be toggled via software */
35966- max8660_dcdc_ops.enable = max8660_dcdc_enable;
35967- max8660_dcdc_ops.disable = max8660_dcdc_disable;
35968+ pax_open_kernel();
35969+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
35970+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
35971+ pax_close_kernel();
35972 }
35973
35974 /*
35975diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
35976index 845aa22..99ec402 100644
35977--- a/drivers/regulator/mc13892-regulator.c
35978+++ b/drivers/regulator/mc13892-regulator.c
35979@@ -574,10 +574,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
35980 }
35981 mc13xxx_unlock(mc13892);
35982
35983- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35984+ pax_open_kernel();
35985+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35986 = mc13892_vcam_set_mode;
35987- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35988+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35989 = mc13892_vcam_get_mode;
35990+ pax_close_kernel();
35991
35992 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
35993 ARRAY_SIZE(mc13892_regulators));
35994diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
35995index cace6d3..f623fda 100644
35996--- a/drivers/rtc/rtc-dev.c
35997+++ b/drivers/rtc/rtc-dev.c
35998@@ -14,6 +14,7 @@
35999 #include <linux/module.h>
36000 #include <linux/rtc.h>
36001 #include <linux/sched.h>
36002+#include <linux/grsecurity.h>
36003 #include "rtc-core.h"
36004
36005 static dev_t rtc_devt;
36006@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
36007 if (copy_from_user(&tm, uarg, sizeof(tm)))
36008 return -EFAULT;
36009
36010+ gr_log_timechange();
36011+
36012 return rtc_set_time(rtc, &tm);
36013
36014 case RTC_PIE_ON:
36015diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
36016index 3fcf627..f334910 100644
36017--- a/drivers/scsi/aacraid/aacraid.h
36018+++ b/drivers/scsi/aacraid/aacraid.h
36019@@ -492,7 +492,7 @@ struct adapter_ops
36020 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
36021 /* Administrative operations */
36022 int (*adapter_comm)(struct aac_dev * dev, int comm);
36023-};
36024+} __no_const;
36025
36026 /*
36027 * Define which interrupt handler needs to be installed
36028diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
36029index 0d279c44..3d25a97 100644
36030--- a/drivers/scsi/aacraid/linit.c
36031+++ b/drivers/scsi/aacraid/linit.c
36032@@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
36033 #elif defined(__devinitconst)
36034 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
36035 #else
36036-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
36037+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
36038 #endif
36039 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
36040 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
36041diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
36042index ff80552..1c4120c 100644
36043--- a/drivers/scsi/aic94xx/aic94xx_init.c
36044+++ b/drivers/scsi/aic94xx/aic94xx_init.c
36045@@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
36046 .lldd_ata_set_dmamode = asd_set_dmamode,
36047 };
36048
36049-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
36050+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
36051 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
36052 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
36053 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
36054diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
36055index 4ad7e36..d004679 100644
36056--- a/drivers/scsi/bfa/bfa.h
36057+++ b/drivers/scsi/bfa/bfa.h
36058@@ -196,7 +196,7 @@ struct bfa_hwif_s {
36059 u32 *end);
36060 int cpe_vec_q0;
36061 int rme_vec_q0;
36062-};
36063+} __no_const;
36064 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
36065
36066 struct bfa_faa_cbfn_s {
36067diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
36068index f0f80e2..8ec946b 100644
36069--- a/drivers/scsi/bfa/bfa_fcpim.c
36070+++ b/drivers/scsi/bfa/bfa_fcpim.c
36071@@ -3715,7 +3715,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
36072
36073 bfa_iotag_attach(fcp);
36074
36075- fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
36076+ fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
36077 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
36078 (fcp->num_itns * sizeof(struct bfa_itn_s));
36079 memset(fcp->itn_arr, 0,
36080@@ -3773,7 +3773,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36081 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
36082 {
36083 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
36084- struct bfa_itn_s *itn;
36085+ bfa_itn_s_no_const *itn;
36086
36087 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
36088 itn->isr = isr;
36089diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
36090index 36f26da..38a34a8 100644
36091--- a/drivers/scsi/bfa/bfa_fcpim.h
36092+++ b/drivers/scsi/bfa/bfa_fcpim.h
36093@@ -37,6 +37,7 @@ struct bfa_iotag_s {
36094 struct bfa_itn_s {
36095 bfa_isr_func_t isr;
36096 };
36097+typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
36098
36099 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36100 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
36101@@ -147,7 +148,7 @@ struct bfa_fcp_mod_s {
36102 struct list_head iotag_tio_free_q; /* free IO resources */
36103 struct list_head iotag_unused_q; /* unused IO resources*/
36104 struct bfa_iotag_s *iotag_arr;
36105- struct bfa_itn_s *itn_arr;
36106+ bfa_itn_s_no_const *itn_arr;
36107 int num_ioim_reqs;
36108 int num_fwtio_reqs;
36109 int num_itns;
36110diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
36111index 1a99d4b..e85d64b 100644
36112--- a/drivers/scsi/bfa/bfa_ioc.h
36113+++ b/drivers/scsi/bfa/bfa_ioc.h
36114@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
36115 bfa_ioc_disable_cbfn_t disable_cbfn;
36116 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36117 bfa_ioc_reset_cbfn_t reset_cbfn;
36118-};
36119+} __no_const;
36120
36121 /*
36122 * IOC event notification mechanism.
36123@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
36124 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
36125 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
36126 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
36127-};
36128+} __no_const;
36129
36130 /*
36131 * Queue element to wait for room in request queue. FIFO order is
36132diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
36133index a3a056a..b9bbc2f 100644
36134--- a/drivers/scsi/hosts.c
36135+++ b/drivers/scsi/hosts.c
36136@@ -42,7 +42,7 @@
36137 #include "scsi_logging.h"
36138
36139
36140-static atomic_t scsi_host_next_hn; /* host_no for next new host */
36141+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36142
36143
36144 static void scsi_host_cls_release(struct device *dev)
36145@@ -360,7 +360,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
36146 * subtract one because we increment first then return, but we need to
36147 * know what the next host number was before increment
36148 */
36149- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36150+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36151 shost->dma_channel = 0xff;
36152
36153 /* These three are default values which can be overridden */
36154diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
36155index 500e20d..ebd3059 100644
36156--- a/drivers/scsi/hpsa.c
36157+++ b/drivers/scsi/hpsa.c
36158@@ -521,7 +521,7 @@ static inline u32 next_command(struct ctlr_info *h)
36159 u32 a;
36160
36161 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
36162- return h->access.command_completed(h);
36163+ return h->access->command_completed(h);
36164
36165 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
36166 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
36167@@ -3002,7 +3002,7 @@ static void start_io(struct ctlr_info *h)
36168 while (!list_empty(&h->reqQ)) {
36169 c = list_entry(h->reqQ.next, struct CommandList, list);
36170 /* can't do anything if fifo is full */
36171- if ((h->access.fifo_full(h))) {
36172+ if ((h->access->fifo_full(h))) {
36173 dev_warn(&h->pdev->dev, "fifo full\n");
36174 break;
36175 }
36176@@ -3012,7 +3012,7 @@ static void start_io(struct ctlr_info *h)
36177 h->Qdepth--;
36178
36179 /* Tell the controller execute command */
36180- h->access.submit_command(h, c);
36181+ h->access->submit_command(h, c);
36182
36183 /* Put job onto the completed Q */
36184 addQ(&h->cmpQ, c);
36185@@ -3021,17 +3021,17 @@ static void start_io(struct ctlr_info *h)
36186
36187 static inline unsigned long get_next_completion(struct ctlr_info *h)
36188 {
36189- return h->access.command_completed(h);
36190+ return h->access->command_completed(h);
36191 }
36192
36193 static inline bool interrupt_pending(struct ctlr_info *h)
36194 {
36195- return h->access.intr_pending(h);
36196+ return h->access->intr_pending(h);
36197 }
36198
36199 static inline long interrupt_not_for_us(struct ctlr_info *h)
36200 {
36201- return (h->access.intr_pending(h) == 0) ||
36202+ return (h->access->intr_pending(h) == 0) ||
36203 (h->interrupts_enabled == 0);
36204 }
36205
36206@@ -3930,7 +3930,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
36207 if (prod_index < 0)
36208 return -ENODEV;
36209 h->product_name = products[prod_index].product_name;
36210- h->access = *(products[prod_index].access);
36211+ h->access = products[prod_index].access;
36212
36213 if (hpsa_board_disabled(h->pdev)) {
36214 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
36215@@ -4175,7 +4175,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
36216
36217 assert_spin_locked(&lockup_detector_lock);
36218 remove_ctlr_from_lockup_detector_list(h);
36219- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36220+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36221 spin_lock_irqsave(&h->lock, flags);
36222 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
36223 spin_unlock_irqrestore(&h->lock, flags);
36224@@ -4355,7 +4355,7 @@ reinit_after_soft_reset:
36225 }
36226
36227 /* make sure the board interrupts are off */
36228- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36229+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36230
36231 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
36232 goto clean2;
36233@@ -4389,7 +4389,7 @@ reinit_after_soft_reset:
36234 * fake ones to scoop up any residual completions.
36235 */
36236 spin_lock_irqsave(&h->lock, flags);
36237- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36238+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36239 spin_unlock_irqrestore(&h->lock, flags);
36240 free_irq(h->intr[h->intr_mode], h);
36241 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
36242@@ -4408,9 +4408,9 @@ reinit_after_soft_reset:
36243 dev_info(&h->pdev->dev, "Board READY.\n");
36244 dev_info(&h->pdev->dev,
36245 "Waiting for stale completions to drain.\n");
36246- h->access.set_intr_mask(h, HPSA_INTR_ON);
36247+ h->access->set_intr_mask(h, HPSA_INTR_ON);
36248 msleep(10000);
36249- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36250+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36251
36252 rc = controller_reset_failed(h->cfgtable);
36253 if (rc)
36254@@ -4431,7 +4431,7 @@ reinit_after_soft_reset:
36255 }
36256
36257 /* Turn the interrupts on so we can service requests */
36258- h->access.set_intr_mask(h, HPSA_INTR_ON);
36259+ h->access->set_intr_mask(h, HPSA_INTR_ON);
36260
36261 hpsa_hba_inquiry(h);
36262 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
36263@@ -4483,7 +4483,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
36264 * To write all data in the battery backed cache to disks
36265 */
36266 hpsa_flush_cache(h);
36267- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36268+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36269 free_irq(h->intr[h->intr_mode], h);
36270 #ifdef CONFIG_PCI_MSI
36271 if (h->msix_vector)
36272@@ -4657,7 +4657,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
36273 return;
36274 }
36275 /* Change the access methods to the performant access methods */
36276- h->access = SA5_performant_access;
36277+ h->access = &SA5_performant_access;
36278 h->transMethod = CFGTBL_Trans_Performant;
36279 }
36280
36281diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
36282index 7b28d54..952f23a 100644
36283--- a/drivers/scsi/hpsa.h
36284+++ b/drivers/scsi/hpsa.h
36285@@ -72,7 +72,7 @@ struct ctlr_info {
36286 unsigned int msix_vector;
36287 unsigned int msi_vector;
36288 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
36289- struct access_method access;
36290+ struct access_method *access;
36291
36292 /* queue and queue Info */
36293 struct list_head reqQ;
36294diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
36295index f2df059..a3a9930 100644
36296--- a/drivers/scsi/ips.h
36297+++ b/drivers/scsi/ips.h
36298@@ -1027,7 +1027,7 @@ typedef struct {
36299 int (*intr)(struct ips_ha *);
36300 void (*enableint)(struct ips_ha *);
36301 uint32_t (*statupd)(struct ips_ha *);
36302-} ips_hw_func_t;
36303+} __no_const ips_hw_func_t;
36304
36305 typedef struct ips_ha {
36306 uint8_t ha_id[IPS_MAX_CHANNELS+1];
36307diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
36308index aceffad..c35c08d 100644
36309--- a/drivers/scsi/libfc/fc_exch.c
36310+++ b/drivers/scsi/libfc/fc_exch.c
36311@@ -105,12 +105,12 @@ struct fc_exch_mgr {
36312 * all together if not used XXX
36313 */
36314 struct {
36315- atomic_t no_free_exch;
36316- atomic_t no_free_exch_xid;
36317- atomic_t xid_not_found;
36318- atomic_t xid_busy;
36319- atomic_t seq_not_found;
36320- atomic_t non_bls_resp;
36321+ atomic_unchecked_t no_free_exch;
36322+ atomic_unchecked_t no_free_exch_xid;
36323+ atomic_unchecked_t xid_not_found;
36324+ atomic_unchecked_t xid_busy;
36325+ atomic_unchecked_t seq_not_found;
36326+ atomic_unchecked_t non_bls_resp;
36327 } stats;
36328 };
36329
36330@@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
36331 /* allocate memory for exchange */
36332 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
36333 if (!ep) {
36334- atomic_inc(&mp->stats.no_free_exch);
36335+ atomic_inc_unchecked(&mp->stats.no_free_exch);
36336 goto out;
36337 }
36338 memset(ep, 0, sizeof(*ep));
36339@@ -780,7 +780,7 @@ out:
36340 return ep;
36341 err:
36342 spin_unlock_bh(&pool->lock);
36343- atomic_inc(&mp->stats.no_free_exch_xid);
36344+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
36345 mempool_free(ep, mp->ep_pool);
36346 return NULL;
36347 }
36348@@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36349 xid = ntohs(fh->fh_ox_id); /* we originated exch */
36350 ep = fc_exch_find(mp, xid);
36351 if (!ep) {
36352- atomic_inc(&mp->stats.xid_not_found);
36353+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36354 reject = FC_RJT_OX_ID;
36355 goto out;
36356 }
36357@@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36358 ep = fc_exch_find(mp, xid);
36359 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36360 if (ep) {
36361- atomic_inc(&mp->stats.xid_busy);
36362+ atomic_inc_unchecked(&mp->stats.xid_busy);
36363 reject = FC_RJT_RX_ID;
36364 goto rel;
36365 }
36366@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36367 }
36368 xid = ep->xid; /* get our XID */
36369 } else if (!ep) {
36370- atomic_inc(&mp->stats.xid_not_found);
36371+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36372 reject = FC_RJT_RX_ID; /* XID not found */
36373 goto out;
36374 }
36375@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36376 } else {
36377 sp = &ep->seq;
36378 if (sp->id != fh->fh_seq_id) {
36379- atomic_inc(&mp->stats.seq_not_found);
36380+ atomic_inc_unchecked(&mp->stats.seq_not_found);
36381 if (f_ctl & FC_FC_END_SEQ) {
36382 /*
36383 * Update sequence_id based on incoming last
36384@@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36385
36386 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36387 if (!ep) {
36388- atomic_inc(&mp->stats.xid_not_found);
36389+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36390 goto out;
36391 }
36392 if (ep->esb_stat & ESB_ST_COMPLETE) {
36393- atomic_inc(&mp->stats.xid_not_found);
36394+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36395 goto rel;
36396 }
36397 if (ep->rxid == FC_XID_UNKNOWN)
36398 ep->rxid = ntohs(fh->fh_rx_id);
36399 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36400- atomic_inc(&mp->stats.xid_not_found);
36401+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36402 goto rel;
36403 }
36404 if (ep->did != ntoh24(fh->fh_s_id) &&
36405 ep->did != FC_FID_FLOGI) {
36406- atomic_inc(&mp->stats.xid_not_found);
36407+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36408 goto rel;
36409 }
36410 sof = fr_sof(fp);
36411@@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36412 sp->ssb_stat |= SSB_ST_RESP;
36413 sp->id = fh->fh_seq_id;
36414 } else if (sp->id != fh->fh_seq_id) {
36415- atomic_inc(&mp->stats.seq_not_found);
36416+ atomic_inc_unchecked(&mp->stats.seq_not_found);
36417 goto rel;
36418 }
36419
36420@@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36421 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
36422
36423 if (!sp)
36424- atomic_inc(&mp->stats.xid_not_found);
36425+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36426 else
36427- atomic_inc(&mp->stats.non_bls_resp);
36428+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
36429
36430 fc_frame_free(fp);
36431 }
36432diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
36433index 441d88a..689ad71 100644
36434--- a/drivers/scsi/libsas/sas_ata.c
36435+++ b/drivers/scsi/libsas/sas_ata.c
36436@@ -529,7 +529,7 @@ static struct ata_port_operations sas_sata_ops = {
36437 .postreset = ata_std_postreset,
36438 .error_handler = ata_std_error_handler,
36439 .post_internal_cmd = sas_ata_post_internal,
36440- .qc_defer = ata_std_qc_defer,
36441+ .qc_defer = ata_std_qc_defer,
36442 .qc_prep = ata_noop_qc_prep,
36443 .qc_issue = sas_ata_qc_issue,
36444 .qc_fill_rtf = sas_ata_qc_fill_rtf,
36445diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
36446index 3a1ffdd..8eb7c71 100644
36447--- a/drivers/scsi/lpfc/lpfc.h
36448+++ b/drivers/scsi/lpfc/lpfc.h
36449@@ -413,7 +413,7 @@ struct lpfc_vport {
36450 struct dentry *debug_nodelist;
36451 struct dentry *vport_debugfs_root;
36452 struct lpfc_debugfs_trc *disc_trc;
36453- atomic_t disc_trc_cnt;
36454+ atomic_unchecked_t disc_trc_cnt;
36455 #endif
36456 uint8_t stat_data_enabled;
36457 uint8_t stat_data_blocked;
36458@@ -826,8 +826,8 @@ struct lpfc_hba {
36459 struct timer_list fabric_block_timer;
36460 unsigned long bit_flags;
36461 #define FABRIC_COMANDS_BLOCKED 0
36462- atomic_t num_rsrc_err;
36463- atomic_t num_cmd_success;
36464+ atomic_unchecked_t num_rsrc_err;
36465+ atomic_unchecked_t num_cmd_success;
36466 unsigned long last_rsrc_error_time;
36467 unsigned long last_ramp_down_time;
36468 unsigned long last_ramp_up_time;
36469@@ -863,7 +863,7 @@ struct lpfc_hba {
36470
36471 struct dentry *debug_slow_ring_trc;
36472 struct lpfc_debugfs_trc *slow_ring_trc;
36473- atomic_t slow_ring_trc_cnt;
36474+ atomic_unchecked_t slow_ring_trc_cnt;
36475 /* iDiag debugfs sub-directory */
36476 struct dentry *idiag_root;
36477 struct dentry *idiag_pci_cfg;
36478diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
36479index af04b0d..8f1a97e 100644
36480--- a/drivers/scsi/lpfc/lpfc_debugfs.c
36481+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
36482@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
36483
36484 #include <linux/debugfs.h>
36485
36486-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36487+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36488 static unsigned long lpfc_debugfs_start_time = 0L;
36489
36490 /* iDiag */
36491@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
36492 lpfc_debugfs_enable = 0;
36493
36494 len = 0;
36495- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36496+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36497 (lpfc_debugfs_max_disc_trc - 1);
36498 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36499 dtp = vport->disc_trc + i;
36500@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
36501 lpfc_debugfs_enable = 0;
36502
36503 len = 0;
36504- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36505+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36506 (lpfc_debugfs_max_slow_ring_trc - 1);
36507 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36508 dtp = phba->slow_ring_trc + i;
36509@@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
36510 !vport || !vport->disc_trc)
36511 return;
36512
36513- index = atomic_inc_return(&vport->disc_trc_cnt) &
36514+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36515 (lpfc_debugfs_max_disc_trc - 1);
36516 dtp = vport->disc_trc + index;
36517 dtp->fmt = fmt;
36518 dtp->data1 = data1;
36519 dtp->data2 = data2;
36520 dtp->data3 = data3;
36521- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36522+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36523 dtp->jif = jiffies;
36524 #endif
36525 return;
36526@@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
36527 !phba || !phba->slow_ring_trc)
36528 return;
36529
36530- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36531+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36532 (lpfc_debugfs_max_slow_ring_trc - 1);
36533 dtp = phba->slow_ring_trc + index;
36534 dtp->fmt = fmt;
36535 dtp->data1 = data1;
36536 dtp->data2 = data2;
36537 dtp->data3 = data3;
36538- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36539+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36540 dtp->jif = jiffies;
36541 #endif
36542 return;
36543@@ -4090,7 +4090,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36544 "slow_ring buffer\n");
36545 goto debug_failed;
36546 }
36547- atomic_set(&phba->slow_ring_trc_cnt, 0);
36548+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36549 memset(phba->slow_ring_trc, 0,
36550 (sizeof(struct lpfc_debugfs_trc) *
36551 lpfc_debugfs_max_slow_ring_trc));
36552@@ -4136,7 +4136,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36553 "buffer\n");
36554 goto debug_failed;
36555 }
36556- atomic_set(&vport->disc_trc_cnt, 0);
36557+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36558
36559 snprintf(name, sizeof(name), "discovery_trace");
36560 vport->debug_disc_trc =
36561diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
36562index 9598fdc..7e9f3d9 100644
36563--- a/drivers/scsi/lpfc/lpfc_init.c
36564+++ b/drivers/scsi/lpfc/lpfc_init.c
36565@@ -10266,8 +10266,10 @@ lpfc_init(void)
36566 "misc_register returned with status %d", error);
36567
36568 if (lpfc_enable_npiv) {
36569- lpfc_transport_functions.vport_create = lpfc_vport_create;
36570- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36571+ pax_open_kernel();
36572+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36573+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36574+ pax_close_kernel();
36575 }
36576 lpfc_transport_template =
36577 fc_attach_transport(&lpfc_transport_functions);
36578diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
36579index 88f3a83..686d3fa 100644
36580--- a/drivers/scsi/lpfc/lpfc_scsi.c
36581+++ b/drivers/scsi/lpfc/lpfc_scsi.c
36582@@ -311,7 +311,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
36583 uint32_t evt_posted;
36584
36585 spin_lock_irqsave(&phba->hbalock, flags);
36586- atomic_inc(&phba->num_rsrc_err);
36587+ atomic_inc_unchecked(&phba->num_rsrc_err);
36588 phba->last_rsrc_error_time = jiffies;
36589
36590 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36591@@ -352,7 +352,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
36592 unsigned long flags;
36593 struct lpfc_hba *phba = vport->phba;
36594 uint32_t evt_posted;
36595- atomic_inc(&phba->num_cmd_success);
36596+ atomic_inc_unchecked(&phba->num_cmd_success);
36597
36598 if (vport->cfg_lun_queue_depth <= queue_depth)
36599 return;
36600@@ -396,8 +396,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36601 unsigned long num_rsrc_err, num_cmd_success;
36602 int i;
36603
36604- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36605- num_cmd_success = atomic_read(&phba->num_cmd_success);
36606+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36607+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36608
36609 vports = lpfc_create_vport_work_array(phba);
36610 if (vports != NULL)
36611@@ -417,8 +417,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36612 }
36613 }
36614 lpfc_destroy_vport_work_array(phba, vports);
36615- atomic_set(&phba->num_rsrc_err, 0);
36616- atomic_set(&phba->num_cmd_success, 0);
36617+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
36618+ atomic_set_unchecked(&phba->num_cmd_success, 0);
36619 }
36620
36621 /**
36622@@ -452,8 +452,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
36623 }
36624 }
36625 lpfc_destroy_vport_work_array(phba, vports);
36626- atomic_set(&phba->num_rsrc_err, 0);
36627- atomic_set(&phba->num_cmd_success, 0);
36628+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
36629+ atomic_set_unchecked(&phba->num_cmd_success, 0);
36630 }
36631
36632 /**
36633diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
36634index ea8a0b4..812a124 100644
36635--- a/drivers/scsi/pmcraid.c
36636+++ b/drivers/scsi/pmcraid.c
36637@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
36638 res->scsi_dev = scsi_dev;
36639 scsi_dev->hostdata = res;
36640 res->change_detected = 0;
36641- atomic_set(&res->read_failures, 0);
36642- atomic_set(&res->write_failures, 0);
36643+ atomic_set_unchecked(&res->read_failures, 0);
36644+ atomic_set_unchecked(&res->write_failures, 0);
36645 rc = 0;
36646 }
36647 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
36648@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
36649
36650 /* If this was a SCSI read/write command keep count of errors */
36651 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
36652- atomic_inc(&res->read_failures);
36653+ atomic_inc_unchecked(&res->read_failures);
36654 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
36655- atomic_inc(&res->write_failures);
36656+ atomic_inc_unchecked(&res->write_failures);
36657
36658 if (!RES_IS_GSCSI(res->cfg_entry) &&
36659 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
36660@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
36661 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36662 * hrrq_id assigned here in queuecommand
36663 */
36664- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36665+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36666 pinstance->num_hrrq;
36667 cmd->cmd_done = pmcraid_io_done;
36668
36669@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
36670 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36671 * hrrq_id assigned here in queuecommand
36672 */
36673- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36674+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36675 pinstance->num_hrrq;
36676
36677 if (request_size) {
36678@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
36679
36680 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
36681 /* add resources only after host is added into system */
36682- if (!atomic_read(&pinstance->expose_resources))
36683+ if (!atomic_read_unchecked(&pinstance->expose_resources))
36684 return;
36685
36686 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
36687@@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
36688 init_waitqueue_head(&pinstance->reset_wait_q);
36689
36690 atomic_set(&pinstance->outstanding_cmds, 0);
36691- atomic_set(&pinstance->last_message_id, 0);
36692- atomic_set(&pinstance->expose_resources, 0);
36693+ atomic_set_unchecked(&pinstance->last_message_id, 0);
36694+ atomic_set_unchecked(&pinstance->expose_resources, 0);
36695
36696 INIT_LIST_HEAD(&pinstance->free_res_q);
36697 INIT_LIST_HEAD(&pinstance->used_res_q);
36698@@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
36699 /* Schedule worker thread to handle CCN and take care of adding and
36700 * removing devices to OS
36701 */
36702- atomic_set(&pinstance->expose_resources, 1);
36703+ atomic_set_unchecked(&pinstance->expose_resources, 1);
36704 schedule_work(&pinstance->worker_q);
36705 return rc;
36706
36707diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
36708index e1d150f..6c6df44 100644
36709--- a/drivers/scsi/pmcraid.h
36710+++ b/drivers/scsi/pmcraid.h
36711@@ -748,7 +748,7 @@ struct pmcraid_instance {
36712 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
36713
36714 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
36715- atomic_t last_message_id;
36716+ atomic_unchecked_t last_message_id;
36717
36718 /* configuration table */
36719 struct pmcraid_config_table *cfg_table;
36720@@ -777,7 +777,7 @@ struct pmcraid_instance {
36721 atomic_t outstanding_cmds;
36722
36723 /* should add/delete resources to mid-layer now ?*/
36724- atomic_t expose_resources;
36725+ atomic_unchecked_t expose_resources;
36726
36727
36728
36729@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
36730 struct pmcraid_config_table_entry_ext cfg_entry_ext;
36731 };
36732 struct scsi_device *scsi_dev; /* Link scsi_device structure */
36733- atomic_t read_failures; /* count of failed READ commands */
36734- atomic_t write_failures; /* count of failed WRITE commands */
36735+ atomic_unchecked_t read_failures; /* count of failed READ commands */
36736+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
36737
36738 /* To indicate add/delete/modify during CCN */
36739 u8 change_detected;
36740diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
36741index a244303..6015eb7 100644
36742--- a/drivers/scsi/qla2xxx/qla_def.h
36743+++ b/drivers/scsi/qla2xxx/qla_def.h
36744@@ -2264,7 +2264,7 @@ struct isp_operations {
36745 int (*start_scsi) (srb_t *);
36746 int (*abort_isp) (struct scsi_qla_host *);
36747 int (*iospace_config)(struct qla_hw_data*);
36748-};
36749+} __no_const;
36750
36751 /* MSI-X Support *************************************************************/
36752
36753diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
36754index 7f2492e..5113877 100644
36755--- a/drivers/scsi/qla4xxx/ql4_def.h
36756+++ b/drivers/scsi/qla4xxx/ql4_def.h
36757@@ -268,7 +268,7 @@ struct ddb_entry {
36758 * (4000 only) */
36759 atomic_t relogin_timer; /* Max Time to wait for
36760 * relogin to complete */
36761- atomic_t relogin_retry_count; /* Num of times relogin has been
36762+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
36763 * retried */
36764 uint32_t default_time2wait; /* Default Min time between
36765 * relogins (+aens) */
36766diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
36767index ee47820..a83b1f4 100644
36768--- a/drivers/scsi/qla4xxx/ql4_os.c
36769+++ b/drivers/scsi/qla4xxx/ql4_os.c
36770@@ -2551,12 +2551,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
36771 */
36772 if (!iscsi_is_session_online(cls_sess)) {
36773 /* Reset retry relogin timer */
36774- atomic_inc(&ddb_entry->relogin_retry_count);
36775+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
36776 DEBUG2(ql4_printk(KERN_INFO, ha,
36777 "%s: index[%d] relogin timed out-retrying"
36778 " relogin (%d), retry (%d)\n", __func__,
36779 ddb_entry->fw_ddb_index,
36780- atomic_read(&ddb_entry->relogin_retry_count),
36781+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
36782 ddb_entry->default_time2wait + 4));
36783 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
36784 atomic_set(&ddb_entry->retry_relogin_timer,
36785@@ -4453,7 +4453,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
36786
36787 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
36788 atomic_set(&ddb_entry->relogin_timer, 0);
36789- atomic_set(&ddb_entry->relogin_retry_count, 0);
36790+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36791 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
36792 ddb_entry->default_relogin_timeout =
36793 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
36794diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
36795index 07322ec..91ccc23 100644
36796--- a/drivers/scsi/scsi.c
36797+++ b/drivers/scsi/scsi.c
36798@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
36799 unsigned long timeout;
36800 int rtn = 0;
36801
36802- atomic_inc(&cmd->device->iorequest_cnt);
36803+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36804
36805 /* check if the device is still usable */
36806 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
36807diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
36808index 4037fd5..a19fcc7 100644
36809--- a/drivers/scsi/scsi_lib.c
36810+++ b/drivers/scsi/scsi_lib.c
36811@@ -1415,7 +1415,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
36812 shost = sdev->host;
36813 scsi_init_cmd_errh(cmd);
36814 cmd->result = DID_NO_CONNECT << 16;
36815- atomic_inc(&cmd->device->iorequest_cnt);
36816+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36817
36818 /*
36819 * SCSI request completion path will do scsi_device_unbusy(),
36820@@ -1441,9 +1441,9 @@ static void scsi_softirq_done(struct request *rq)
36821
36822 INIT_LIST_HEAD(&cmd->eh_entry);
36823
36824- atomic_inc(&cmd->device->iodone_cnt);
36825+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
36826 if (cmd->result)
36827- atomic_inc(&cmd->device->ioerr_cnt);
36828+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
36829
36830 disposition = scsi_decide_disposition(cmd);
36831 if (disposition != SUCCESS &&
36832diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
36833index 04c2a27..9d8bd66 100644
36834--- a/drivers/scsi/scsi_sysfs.c
36835+++ b/drivers/scsi/scsi_sysfs.c
36836@@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
36837 char *buf) \
36838 { \
36839 struct scsi_device *sdev = to_scsi_device(dev); \
36840- unsigned long long count = atomic_read(&sdev->field); \
36841+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
36842 return snprintf(buf, 20, "0x%llx\n", count); \
36843 } \
36844 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
36845diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
36846index 84a1fdf..693b0d6 100644
36847--- a/drivers/scsi/scsi_tgt_lib.c
36848+++ b/drivers/scsi/scsi_tgt_lib.c
36849@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
36850 int err;
36851
36852 dprintk("%lx %u\n", uaddr, len);
36853- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
36854+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
36855 if (err) {
36856 /*
36857 * TODO: need to fixup sg_tablesize, max_segment_size,
36858diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
36859index 80fbe2a..efa223b 100644
36860--- a/drivers/scsi/scsi_transport_fc.c
36861+++ b/drivers/scsi/scsi_transport_fc.c
36862@@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
36863 * Netlink Infrastructure
36864 */
36865
36866-static atomic_t fc_event_seq;
36867+static atomic_unchecked_t fc_event_seq;
36868
36869 /**
36870 * fc_get_event_number - Obtain the next sequential FC event number
36871@@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
36872 u32
36873 fc_get_event_number(void)
36874 {
36875- return atomic_add_return(1, &fc_event_seq);
36876+ return atomic_add_return_unchecked(1, &fc_event_seq);
36877 }
36878 EXPORT_SYMBOL(fc_get_event_number);
36879
36880@@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
36881 {
36882 int error;
36883
36884- atomic_set(&fc_event_seq, 0);
36885+ atomic_set_unchecked(&fc_event_seq, 0);
36886
36887 error = transport_class_register(&fc_host_class);
36888 if (error)
36889@@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
36890 char *cp;
36891
36892 *val = simple_strtoul(buf, &cp, 0);
36893- if ((*cp && (*cp != '\n')) || (*val < 0))
36894+ if (*cp && (*cp != '\n'))
36895 return -EINVAL;
36896 /*
36897 * Check for overflow; dev_loss_tmo is u32
36898diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
36899index 1cf640e..78e9014 100644
36900--- a/drivers/scsi/scsi_transport_iscsi.c
36901+++ b/drivers/scsi/scsi_transport_iscsi.c
36902@@ -79,7 +79,7 @@ struct iscsi_internal {
36903 struct transport_container session_cont;
36904 };
36905
36906-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
36907+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
36908 static struct workqueue_struct *iscsi_eh_timer_workq;
36909
36910 static DEFINE_IDA(iscsi_sess_ida);
36911@@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
36912 int err;
36913
36914 ihost = shost->shost_data;
36915- session->sid = atomic_add_return(1, &iscsi_session_nr);
36916+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
36917
36918 if (target_id == ISCSI_MAX_TARGET) {
36919 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
36920@@ -2940,7 +2940,7 @@ static __init int iscsi_transport_init(void)
36921 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
36922 ISCSI_TRANSPORT_VERSION);
36923
36924- atomic_set(&iscsi_session_nr, 0);
36925+ atomic_set_unchecked(&iscsi_session_nr, 0);
36926
36927 err = class_register(&iscsi_transport_class);
36928 if (err)
36929diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
36930index 21a045e..ec89e03 100644
36931--- a/drivers/scsi/scsi_transport_srp.c
36932+++ b/drivers/scsi/scsi_transport_srp.c
36933@@ -33,7 +33,7 @@
36934 #include "scsi_transport_srp_internal.h"
36935
36936 struct srp_host_attrs {
36937- atomic_t next_port_id;
36938+ atomic_unchecked_t next_port_id;
36939 };
36940 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
36941
36942@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
36943 struct Scsi_Host *shost = dev_to_shost(dev);
36944 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
36945
36946- atomic_set(&srp_host->next_port_id, 0);
36947+ atomic_set_unchecked(&srp_host->next_port_id, 0);
36948 return 0;
36949 }
36950
36951@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
36952 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
36953 rport->roles = ids->roles;
36954
36955- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
36956+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
36957 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
36958
36959 transport_setup_device(&rport->dev);
36960diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
36961index eacd46b..e3f4d62 100644
36962--- a/drivers/scsi/sg.c
36963+++ b/drivers/scsi/sg.c
36964@@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
36965 sdp->disk->disk_name,
36966 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
36967 NULL,
36968- (char *)arg);
36969+ (char __user *)arg);
36970 case BLKTRACESTART:
36971 return blk_trace_startstop(sdp->device->request_queue, 1);
36972 case BLKTRACESTOP:
36973@@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
36974 const struct file_operations * fops;
36975 };
36976
36977-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
36978+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
36979 {"allow_dio", &adio_fops},
36980 {"debug", &debug_fops},
36981 {"def_reserved_size", &dressz_fops},
36982@@ -2332,7 +2332,7 @@ sg_proc_init(void)
36983 if (!sg_proc_sgp)
36984 return 1;
36985 for (k = 0; k < num_leaves; ++k) {
36986- struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
36987+ const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
36988 umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
36989 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
36990 }
36991diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
36992index 3d8f662..070f1a5 100644
36993--- a/drivers/spi/spi.c
36994+++ b/drivers/spi/spi.c
36995@@ -1361,7 +1361,7 @@ int spi_bus_unlock(struct spi_master *master)
36996 EXPORT_SYMBOL_GPL(spi_bus_unlock);
36997
36998 /* portable code must never pass more than 32 bytes */
36999-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
37000+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
37001
37002 static u8 *buf;
37003
37004diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
37005index d91751f..a3a9e36 100644
37006--- a/drivers/staging/octeon/ethernet-rx.c
37007+++ b/drivers/staging/octeon/ethernet-rx.c
37008@@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37009 /* Increment RX stats for virtual ports */
37010 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
37011 #ifdef CONFIG_64BIT
37012- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
37013- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
37014+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
37015+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
37016 #else
37017- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
37018- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
37019+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
37020+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
37021 #endif
37022 }
37023 netif_receive_skb(skb);
37024@@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37025 dev->name);
37026 */
37027 #ifdef CONFIG_64BIT
37028- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
37029+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37030 #else
37031- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
37032+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
37033 #endif
37034 dev_kfree_skb_irq(skb);
37035 }
37036diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
37037index 60cba81..71eb239 100644
37038--- a/drivers/staging/octeon/ethernet.c
37039+++ b/drivers/staging/octeon/ethernet.c
37040@@ -259,11 +259,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
37041 * since the RX tasklet also increments it.
37042 */
37043 #ifdef CONFIG_64BIT
37044- atomic64_add(rx_status.dropped_packets,
37045- (atomic64_t *)&priv->stats.rx_dropped);
37046+ atomic64_add_unchecked(rx_status.dropped_packets,
37047+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37048 #else
37049- atomic_add(rx_status.dropped_packets,
37050- (atomic_t *)&priv->stats.rx_dropped);
37051+ atomic_add_unchecked(rx_status.dropped_packets,
37052+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
37053 #endif
37054 }
37055
37056diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
37057index d3d8727..f9327bb8 100644
37058--- a/drivers/staging/rtl8712/rtl871x_io.h
37059+++ b/drivers/staging/rtl8712/rtl871x_io.h
37060@@ -108,7 +108,7 @@ struct _io_ops {
37061 u8 *pmem);
37062 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
37063 u8 *pmem);
37064-};
37065+} __no_const;
37066
37067 struct io_req {
37068 struct list_head list;
37069diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
37070index c7b5e8b..783d6cb 100644
37071--- a/drivers/staging/sbe-2t3e3/netdev.c
37072+++ b/drivers/staging/sbe-2t3e3/netdev.c
37073@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
37074 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
37075
37076 if (rlen)
37077- if (copy_to_user(data, &resp, rlen))
37078+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
37079 return -EFAULT;
37080
37081 return 0;
37082diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
37083index 42cdafe..2769103 100644
37084--- a/drivers/staging/speakup/speakup_soft.c
37085+++ b/drivers/staging/speakup/speakup_soft.c
37086@@ -241,11 +241,11 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
37087 break;
37088 } else if (!initialized) {
37089 if (*init) {
37090- ch = *init;
37091 init++;
37092 } else {
37093 initialized = 1;
37094 }
37095+ ch = *init;
37096 } else {
37097 ch = synth_buffer_getc();
37098 }
37099diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
37100index c7b888c..c94be93 100644
37101--- a/drivers/staging/usbip/usbip_common.h
37102+++ b/drivers/staging/usbip/usbip_common.h
37103@@ -289,7 +289,7 @@ struct usbip_device {
37104 void (*shutdown)(struct usbip_device *);
37105 void (*reset)(struct usbip_device *);
37106 void (*unusable)(struct usbip_device *);
37107- } eh_ops;
37108+ } __no_const eh_ops;
37109 };
37110
37111 /* usbip_common.c */
37112diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
37113index 88b3298..3783eee 100644
37114--- a/drivers/staging/usbip/vhci.h
37115+++ b/drivers/staging/usbip/vhci.h
37116@@ -88,7 +88,7 @@ struct vhci_hcd {
37117 unsigned resuming:1;
37118 unsigned long re_timeout;
37119
37120- atomic_t seqnum;
37121+ atomic_unchecked_t seqnum;
37122
37123 /*
37124 * NOTE:
37125diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
37126index dca9bf1..80735c9 100644
37127--- a/drivers/staging/usbip/vhci_hcd.c
37128+++ b/drivers/staging/usbip/vhci_hcd.c
37129@@ -488,7 +488,7 @@ static void vhci_tx_urb(struct urb *urb)
37130 return;
37131 }
37132
37133- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37134+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37135 if (priv->seqnum == 0xffff)
37136 dev_info(&urb->dev->dev, "seqnum max\n");
37137
37138@@ -740,7 +740,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
37139 return -ENOMEM;
37140 }
37141
37142- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
37143+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37144 if (unlink->seqnum == 0xffff)
37145 pr_info("seqnum max\n");
37146
37147@@ -928,7 +928,7 @@ static int vhci_start(struct usb_hcd *hcd)
37148 vdev->rhport = rhport;
37149 }
37150
37151- atomic_set(&vhci->seqnum, 0);
37152+ atomic_set_unchecked(&vhci->seqnum, 0);
37153 spin_lock_init(&vhci->lock);
37154
37155 hcd->power_budget = 0; /* no limit */
37156diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
37157index f5fba732..210a16c 100644
37158--- a/drivers/staging/usbip/vhci_rx.c
37159+++ b/drivers/staging/usbip/vhci_rx.c
37160@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
37161 if (!urb) {
37162 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
37163 pr_info("max seqnum %d\n",
37164- atomic_read(&the_controller->seqnum));
37165+ atomic_read_unchecked(&the_controller->seqnum));
37166 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
37167 return;
37168 }
37169diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
37170index 7735027..30eed13 100644
37171--- a/drivers/staging/vt6655/hostap.c
37172+++ b/drivers/staging/vt6655/hostap.c
37173@@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
37174 *
37175 */
37176
37177+static net_device_ops_no_const apdev_netdev_ops;
37178+
37179 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37180 {
37181 PSDevice apdev_priv;
37182 struct net_device *dev = pDevice->dev;
37183 int ret;
37184- const struct net_device_ops apdev_netdev_ops = {
37185- .ndo_start_xmit = pDevice->tx_80211,
37186- };
37187
37188 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37189
37190@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37191 *apdev_priv = *pDevice;
37192 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37193
37194+ /* only half broken now */
37195+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37196 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37197
37198 pDevice->apdev->type = ARPHRD_IEEE80211;
37199diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
37200index 51b5adf..098e320 100644
37201--- a/drivers/staging/vt6656/hostap.c
37202+++ b/drivers/staging/vt6656/hostap.c
37203@@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
37204 *
37205 */
37206
37207+static net_device_ops_no_const apdev_netdev_ops;
37208+
37209 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37210 {
37211 PSDevice apdev_priv;
37212 struct net_device *dev = pDevice->dev;
37213 int ret;
37214- const struct net_device_ops apdev_netdev_ops = {
37215- .ndo_start_xmit = pDevice->tx_80211,
37216- };
37217
37218 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37219
37220@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37221 *apdev_priv = *pDevice;
37222 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37223
37224+ /* only half broken now */
37225+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37226 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37227
37228 pDevice->apdev->type = ARPHRD_IEEE80211;
37229diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
37230index 7843dfd..3db105f 100644
37231--- a/drivers/staging/wlan-ng/hfa384x_usb.c
37232+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
37233@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
37234
37235 struct usbctlx_completor {
37236 int (*complete) (struct usbctlx_completor *);
37237-};
37238+} __no_const;
37239
37240 static int
37241 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
37242diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
37243index 1ca66ea..76f1343 100644
37244--- a/drivers/staging/zcache/tmem.c
37245+++ b/drivers/staging/zcache/tmem.c
37246@@ -39,7 +39,7 @@
37247 * A tmem host implementation must use this function to register callbacks
37248 * for memory allocation.
37249 */
37250-static struct tmem_hostops tmem_hostops;
37251+static tmem_hostops_no_const tmem_hostops;
37252
37253 static void tmem_objnode_tree_init(void);
37254
37255@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
37256 * A tmem host implementation must use this function to register
37257 * callbacks for a page-accessible memory (PAM) implementation
37258 */
37259-static struct tmem_pamops tmem_pamops;
37260+static tmem_pamops_no_const tmem_pamops;
37261
37262 void tmem_register_pamops(struct tmem_pamops *m)
37263 {
37264diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
37265index 0d4aa82..f7832d4 100644
37266--- a/drivers/staging/zcache/tmem.h
37267+++ b/drivers/staging/zcache/tmem.h
37268@@ -180,6 +180,7 @@ struct tmem_pamops {
37269 void (*new_obj)(struct tmem_obj *);
37270 int (*replace_in_obj)(void *, struct tmem_obj *);
37271 };
37272+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
37273 extern void tmem_register_pamops(struct tmem_pamops *m);
37274
37275 /* memory allocation methods provided by the host implementation */
37276@@ -189,6 +190,7 @@ struct tmem_hostops {
37277 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
37278 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
37279 };
37280+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
37281 extern void tmem_register_hostops(struct tmem_hostops *m);
37282
37283 /* core tmem accessor functions */
37284diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
37285index f015839..b15dfc4 100644
37286--- a/drivers/target/target_core_tmr.c
37287+++ b/drivers/target/target_core_tmr.c
37288@@ -327,7 +327,7 @@ static void core_tmr_drain_task_list(
37289 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
37290 cmd->t_task_list_num,
37291 atomic_read(&cmd->t_task_cdbs_left),
37292- atomic_read(&cmd->t_task_cdbs_sent),
37293+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37294 (cmd->transport_state & CMD_T_ACTIVE) != 0,
37295 (cmd->transport_state & CMD_T_STOP) != 0,
37296 (cmd->transport_state & CMD_T_SENT) != 0);
37297diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
37298index 443704f..92d3517 100644
37299--- a/drivers/target/target_core_transport.c
37300+++ b/drivers/target/target_core_transport.c
37301@@ -1355,7 +1355,7 @@ struct se_device *transport_add_device_to_core_hba(
37302 spin_lock_init(&dev->se_port_lock);
37303 spin_lock_init(&dev->se_tmr_lock);
37304 spin_lock_init(&dev->qf_cmd_lock);
37305- atomic_set(&dev->dev_ordered_id, 0);
37306+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
37307
37308 se_dev_set_default_attribs(dev, dev_limits);
37309
37310@@ -1542,7 +1542,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
37311 * Used to determine when ORDERED commands should go from
37312 * Dormant to Active status.
37313 */
37314- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
37315+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
37316 smp_mb__after_atomic_inc();
37317 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
37318 cmd->se_ordered_id, cmd->sam_task_attr,
37319@@ -1956,7 +1956,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
37320 " CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
37321 cmd->t_task_list_num,
37322 atomic_read(&cmd->t_task_cdbs_left),
37323- atomic_read(&cmd->t_task_cdbs_sent),
37324+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37325 atomic_read(&cmd->t_task_cdbs_ex_left),
37326 (cmd->transport_state & CMD_T_ACTIVE) != 0,
37327 (cmd->transport_state & CMD_T_STOP) != 0,
37328@@ -2216,9 +2216,9 @@ check_depth:
37329 cmd = task->task_se_cmd;
37330 spin_lock_irqsave(&cmd->t_state_lock, flags);
37331 task->task_flags |= (TF_ACTIVE | TF_SENT);
37332- atomic_inc(&cmd->t_task_cdbs_sent);
37333+ atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
37334
37335- if (atomic_read(&cmd->t_task_cdbs_sent) ==
37336+ if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
37337 cmd->t_task_list_num)
37338 cmd->transport_state |= CMD_T_SENT;
37339
37340diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
37341index 3436436..772237b 100644
37342--- a/drivers/tty/hvc/hvcs.c
37343+++ b/drivers/tty/hvc/hvcs.c
37344@@ -83,6 +83,7 @@
37345 #include <asm/hvcserver.h>
37346 #include <asm/uaccess.h>
37347 #include <asm/vio.h>
37348+#include <asm/local.h>
37349
37350 /*
37351 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
37352@@ -270,7 +271,7 @@ struct hvcs_struct {
37353 unsigned int index;
37354
37355 struct tty_struct *tty;
37356- int open_count;
37357+ local_t open_count;
37358
37359 /*
37360 * Used to tell the driver kernel_thread what operations need to take
37361@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
37362
37363 spin_lock_irqsave(&hvcsd->lock, flags);
37364
37365- if (hvcsd->open_count > 0) {
37366+ if (local_read(&hvcsd->open_count) > 0) {
37367 spin_unlock_irqrestore(&hvcsd->lock, flags);
37368 printk(KERN_INFO "HVCS: vterm state unchanged. "
37369 "The hvcs device node is still in use.\n");
37370@@ -1138,7 +1139,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
37371 if ((retval = hvcs_partner_connect(hvcsd)))
37372 goto error_release;
37373
37374- hvcsd->open_count = 1;
37375+ local_set(&hvcsd->open_count, 1);
37376 hvcsd->tty = tty;
37377 tty->driver_data = hvcsd;
37378
37379@@ -1172,7 +1173,7 @@ fast_open:
37380
37381 spin_lock_irqsave(&hvcsd->lock, flags);
37382 kref_get(&hvcsd->kref);
37383- hvcsd->open_count++;
37384+ local_inc(&hvcsd->open_count);
37385 hvcsd->todo_mask |= HVCS_SCHED_READ;
37386 spin_unlock_irqrestore(&hvcsd->lock, flags);
37387
37388@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37389 hvcsd = tty->driver_data;
37390
37391 spin_lock_irqsave(&hvcsd->lock, flags);
37392- if (--hvcsd->open_count == 0) {
37393+ if (local_dec_and_test(&hvcsd->open_count)) {
37394
37395 vio_disable_interrupts(hvcsd->vdev);
37396
37397@@ -1242,10 +1243,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37398 free_irq(irq, hvcsd);
37399 kref_put(&hvcsd->kref, destroy_hvcs_struct);
37400 return;
37401- } else if (hvcsd->open_count < 0) {
37402+ } else if (local_read(&hvcsd->open_count) < 0) {
37403 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
37404 " is missmanaged.\n",
37405- hvcsd->vdev->unit_address, hvcsd->open_count);
37406+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
37407 }
37408
37409 spin_unlock_irqrestore(&hvcsd->lock, flags);
37410@@ -1261,7 +1262,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37411
37412 spin_lock_irqsave(&hvcsd->lock, flags);
37413 /* Preserve this so that we know how many kref refs to put */
37414- temp_open_count = hvcsd->open_count;
37415+ temp_open_count = local_read(&hvcsd->open_count);
37416
37417 /*
37418 * Don't kref put inside the spinlock because the destruction
37419@@ -1276,7 +1277,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37420 hvcsd->tty->driver_data = NULL;
37421 hvcsd->tty = NULL;
37422
37423- hvcsd->open_count = 0;
37424+ local_set(&hvcsd->open_count, 0);
37425
37426 /* This will drop any buffered data on the floor which is OK in a hangup
37427 * scenario. */
37428@@ -1347,7 +1348,7 @@ static int hvcs_write(struct tty_struct *tty,
37429 * the middle of a write operation? This is a crummy place to do this
37430 * but we want to keep it all in the spinlock.
37431 */
37432- if (hvcsd->open_count <= 0) {
37433+ if (local_read(&hvcsd->open_count) <= 0) {
37434 spin_unlock_irqrestore(&hvcsd->lock, flags);
37435 return -ENODEV;
37436 }
37437@@ -1421,7 +1422,7 @@ static int hvcs_write_room(struct tty_struct *tty)
37438 {
37439 struct hvcs_struct *hvcsd = tty->driver_data;
37440
37441- if (!hvcsd || hvcsd->open_count <= 0)
37442+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
37443 return 0;
37444
37445 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
37446diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
37447index 4daf962..b4a2281 100644
37448--- a/drivers/tty/ipwireless/tty.c
37449+++ b/drivers/tty/ipwireless/tty.c
37450@@ -29,6 +29,7 @@
37451 #include <linux/tty_driver.h>
37452 #include <linux/tty_flip.h>
37453 #include <linux/uaccess.h>
37454+#include <asm/local.h>
37455
37456 #include "tty.h"
37457 #include "network.h"
37458@@ -51,7 +52,7 @@ struct ipw_tty {
37459 int tty_type;
37460 struct ipw_network *network;
37461 struct tty_struct *linux_tty;
37462- int open_count;
37463+ local_t open_count;
37464 unsigned int control_lines;
37465 struct mutex ipw_tty_mutex;
37466 int tx_bytes_queued;
37467@@ -117,10 +118,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37468 mutex_unlock(&tty->ipw_tty_mutex);
37469 return -ENODEV;
37470 }
37471- if (tty->open_count == 0)
37472+ if (local_read(&tty->open_count) == 0)
37473 tty->tx_bytes_queued = 0;
37474
37475- tty->open_count++;
37476+ local_inc(&tty->open_count);
37477
37478 tty->linux_tty = linux_tty;
37479 linux_tty->driver_data = tty;
37480@@ -136,9 +137,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37481
37482 static void do_ipw_close(struct ipw_tty *tty)
37483 {
37484- tty->open_count--;
37485-
37486- if (tty->open_count == 0) {
37487+ if (local_dec_return(&tty->open_count) == 0) {
37488 struct tty_struct *linux_tty = tty->linux_tty;
37489
37490 if (linux_tty != NULL) {
37491@@ -159,7 +158,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
37492 return;
37493
37494 mutex_lock(&tty->ipw_tty_mutex);
37495- if (tty->open_count == 0) {
37496+ if (local_read(&tty->open_count) == 0) {
37497 mutex_unlock(&tty->ipw_tty_mutex);
37498 return;
37499 }
37500@@ -188,7 +187,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
37501 return;
37502 }
37503
37504- if (!tty->open_count) {
37505+ if (!local_read(&tty->open_count)) {
37506 mutex_unlock(&tty->ipw_tty_mutex);
37507 return;
37508 }
37509@@ -230,7 +229,7 @@ static int ipw_write(struct tty_struct *linux_tty,
37510 return -ENODEV;
37511
37512 mutex_lock(&tty->ipw_tty_mutex);
37513- if (!tty->open_count) {
37514+ if (!local_read(&tty->open_count)) {
37515 mutex_unlock(&tty->ipw_tty_mutex);
37516 return -EINVAL;
37517 }
37518@@ -270,7 +269,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
37519 if (!tty)
37520 return -ENODEV;
37521
37522- if (!tty->open_count)
37523+ if (!local_read(&tty->open_count))
37524 return -EINVAL;
37525
37526 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
37527@@ -312,7 +311,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
37528 if (!tty)
37529 return 0;
37530
37531- if (!tty->open_count)
37532+ if (!local_read(&tty->open_count))
37533 return 0;
37534
37535 return tty->tx_bytes_queued;
37536@@ -393,7 +392,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
37537 if (!tty)
37538 return -ENODEV;
37539
37540- if (!tty->open_count)
37541+ if (!local_read(&tty->open_count))
37542 return -EINVAL;
37543
37544 return get_control_lines(tty);
37545@@ -409,7 +408,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
37546 if (!tty)
37547 return -ENODEV;
37548
37549- if (!tty->open_count)
37550+ if (!local_read(&tty->open_count))
37551 return -EINVAL;
37552
37553 return set_control_lines(tty, set, clear);
37554@@ -423,7 +422,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
37555 if (!tty)
37556 return -ENODEV;
37557
37558- if (!tty->open_count)
37559+ if (!local_read(&tty->open_count))
37560 return -EINVAL;
37561
37562 /* FIXME: Exactly how is the tty object locked here .. */
37563@@ -572,7 +571,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
37564 against a parallel ioctl etc */
37565 mutex_lock(&ttyj->ipw_tty_mutex);
37566 }
37567- while (ttyj->open_count)
37568+ while (local_read(&ttyj->open_count))
37569 do_ipw_close(ttyj);
37570 ipwireless_disassociate_network_ttys(network,
37571 ttyj->channel_idx);
37572diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
37573index c43b683..0a88f1c 100644
37574--- a/drivers/tty/n_gsm.c
37575+++ b/drivers/tty/n_gsm.c
37576@@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
37577 kref_init(&dlci->ref);
37578 mutex_init(&dlci->mutex);
37579 dlci->fifo = &dlci->_fifo;
37580- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
37581+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
37582 kfree(dlci);
37583 return NULL;
37584 }
37585diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
37586index 94b6eda..15f7cec 100644
37587--- a/drivers/tty/n_tty.c
37588+++ b/drivers/tty/n_tty.c
37589@@ -2122,6 +2122,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
37590 {
37591 *ops = tty_ldisc_N_TTY;
37592 ops->owner = NULL;
37593- ops->refcount = ops->flags = 0;
37594+ atomic_set(&ops->refcount, 0);
37595+ ops->flags = 0;
37596 }
37597 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
37598diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
37599index eeae7fa..177a743 100644
37600--- a/drivers/tty/pty.c
37601+++ b/drivers/tty/pty.c
37602@@ -707,8 +707,10 @@ static void __init unix98_pty_init(void)
37603 panic("Couldn't register Unix98 pts driver");
37604
37605 /* Now create the /dev/ptmx special device */
37606+ pax_open_kernel();
37607 tty_default_fops(&ptmx_fops);
37608- ptmx_fops.open = ptmx_open;
37609+ *(void **)&ptmx_fops.open = ptmx_open;
37610+ pax_close_kernel();
37611
37612 cdev_init(&ptmx_cdev, &ptmx_fops);
37613 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
37614diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
37615index 2b42a01..32a2ed3 100644
37616--- a/drivers/tty/serial/kgdboc.c
37617+++ b/drivers/tty/serial/kgdboc.c
37618@@ -24,8 +24,9 @@
37619 #define MAX_CONFIG_LEN 40
37620
37621 static struct kgdb_io kgdboc_io_ops;
37622+static struct kgdb_io kgdboc_io_ops_console;
37623
37624-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37625+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
37626 static int configured = -1;
37627
37628 static char config[MAX_CONFIG_LEN];
37629@@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
37630 kgdboc_unregister_kbd();
37631 if (configured == 1)
37632 kgdb_unregister_io_module(&kgdboc_io_ops);
37633+ else if (configured == 2)
37634+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
37635 }
37636
37637 static int configure_kgdboc(void)
37638@@ -157,13 +160,13 @@ static int configure_kgdboc(void)
37639 int err;
37640 char *cptr = config;
37641 struct console *cons;
37642+ int is_console = 0;
37643
37644 err = kgdboc_option_setup(config);
37645 if (err || !strlen(config) || isspace(config[0]))
37646 goto noconfig;
37647
37648 err = -ENODEV;
37649- kgdboc_io_ops.is_console = 0;
37650 kgdb_tty_driver = NULL;
37651
37652 kgdboc_use_kms = 0;
37653@@ -184,7 +187,7 @@ static int configure_kgdboc(void)
37654 int idx;
37655 if (cons->device && cons->device(cons, &idx) == p &&
37656 idx == tty_line) {
37657- kgdboc_io_ops.is_console = 1;
37658+ is_console = 1;
37659 break;
37660 }
37661 cons = cons->next;
37662@@ -194,12 +197,16 @@ static int configure_kgdboc(void)
37663 kgdb_tty_line = tty_line;
37664
37665 do_register:
37666- err = kgdb_register_io_module(&kgdboc_io_ops);
37667+ if (is_console) {
37668+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
37669+ configured = 2;
37670+ } else {
37671+ err = kgdb_register_io_module(&kgdboc_io_ops);
37672+ configured = 1;
37673+ }
37674 if (err)
37675 goto noconfig;
37676
37677- configured = 1;
37678-
37679 return 0;
37680
37681 noconfig:
37682@@ -213,7 +220,7 @@ noconfig:
37683 static int __init init_kgdboc(void)
37684 {
37685 /* Already configured? */
37686- if (configured == 1)
37687+ if (configured >= 1)
37688 return 0;
37689
37690 return configure_kgdboc();
37691@@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
37692 if (config[len - 1] == '\n')
37693 config[len - 1] = '\0';
37694
37695- if (configured == 1)
37696+ if (configured >= 1)
37697 cleanup_kgdboc();
37698
37699 /* Go and configure with the new params. */
37700@@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
37701 .post_exception = kgdboc_post_exp_handler,
37702 };
37703
37704+static struct kgdb_io kgdboc_io_ops_console = {
37705+ .name = "kgdboc",
37706+ .read_char = kgdboc_get_char,
37707+ .write_char = kgdboc_put_char,
37708+ .pre_exception = kgdboc_pre_exp_handler,
37709+ .post_exception = kgdboc_post_exp_handler,
37710+ .is_console = 1
37711+};
37712+
37713 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
37714 /* This is only available if kgdboc is a built in for early debugging */
37715 static int __init kgdboc_early_init(char *opt)
37716diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
37717index 05728894..b9d44c6 100644
37718--- a/drivers/tty/sysrq.c
37719+++ b/drivers/tty/sysrq.c
37720@@ -865,7 +865,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
37721 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
37722 size_t count, loff_t *ppos)
37723 {
37724- if (count) {
37725+ if (count && capable(CAP_SYS_ADMIN)) {
37726 char c;
37727
37728 if (get_user(c, buf))
37729diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
37730index d939bd7..33d92cd 100644
37731--- a/drivers/tty/tty_io.c
37732+++ b/drivers/tty/tty_io.c
37733@@ -3278,7 +3278,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
37734
37735 void tty_default_fops(struct file_operations *fops)
37736 {
37737- *fops = tty_fops;
37738+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
37739 }
37740
37741 /*
37742diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
37743index 24b95db..9c078d0 100644
37744--- a/drivers/tty/tty_ldisc.c
37745+++ b/drivers/tty/tty_ldisc.c
37746@@ -57,7 +57,7 @@ static void put_ldisc(struct tty_ldisc *ld)
37747 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
37748 struct tty_ldisc_ops *ldo = ld->ops;
37749
37750- ldo->refcount--;
37751+ atomic_dec(&ldo->refcount);
37752 module_put(ldo->owner);
37753 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37754
37755@@ -92,7 +92,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
37756 spin_lock_irqsave(&tty_ldisc_lock, flags);
37757 tty_ldiscs[disc] = new_ldisc;
37758 new_ldisc->num = disc;
37759- new_ldisc->refcount = 0;
37760+ atomic_set(&new_ldisc->refcount, 0);
37761 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37762
37763 return ret;
37764@@ -120,7 +120,7 @@ int tty_unregister_ldisc(int disc)
37765 return -EINVAL;
37766
37767 spin_lock_irqsave(&tty_ldisc_lock, flags);
37768- if (tty_ldiscs[disc]->refcount)
37769+ if (atomic_read(&tty_ldiscs[disc]->refcount))
37770 ret = -EBUSY;
37771 else
37772 tty_ldiscs[disc] = NULL;
37773@@ -141,7 +141,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
37774 if (ldops) {
37775 ret = ERR_PTR(-EAGAIN);
37776 if (try_module_get(ldops->owner)) {
37777- ldops->refcount++;
37778+ atomic_inc(&ldops->refcount);
37779 ret = ldops;
37780 }
37781 }
37782@@ -154,7 +154,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
37783 unsigned long flags;
37784
37785 spin_lock_irqsave(&tty_ldisc_lock, flags);
37786- ldops->refcount--;
37787+ atomic_dec(&ldops->refcount);
37788 module_put(ldops->owner);
37789 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37790 }
37791diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
37792index 3b0c4e3..f98a992 100644
37793--- a/drivers/tty/vt/keyboard.c
37794+++ b/drivers/tty/vt/keyboard.c
37795@@ -663,6 +663,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
37796 kbd->kbdmode == VC_OFF) &&
37797 value != KVAL(K_SAK))
37798 return; /* SAK is allowed even in raw mode */
37799+
37800+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
37801+ {
37802+ void *func = fn_handler[value];
37803+ if (func == fn_show_state || func == fn_show_ptregs ||
37804+ func == fn_show_mem)
37805+ return;
37806+ }
37807+#endif
37808+
37809 fn_handler[value](vc);
37810 }
37811
37812@@ -1812,9 +1822,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
37813 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
37814 return -EFAULT;
37815
37816- if (!capable(CAP_SYS_TTY_CONFIG))
37817- perm = 0;
37818-
37819 switch (cmd) {
37820 case KDGKBENT:
37821 /* Ensure another thread doesn't free it under us */
37822@@ -1829,6 +1836,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
37823 spin_unlock_irqrestore(&kbd_event_lock, flags);
37824 return put_user(val, &user_kbe->kb_value);
37825 case KDSKBENT:
37826+ if (!capable(CAP_SYS_TTY_CONFIG))
37827+ perm = 0;
37828+
37829 if (!perm)
37830 return -EPERM;
37831 if (!i && v == K_NOSUCHMAP) {
37832@@ -1919,9 +1929,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
37833 int i, j, k;
37834 int ret;
37835
37836- if (!capable(CAP_SYS_TTY_CONFIG))
37837- perm = 0;
37838-
37839 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
37840 if (!kbs) {
37841 ret = -ENOMEM;
37842@@ -1955,6 +1962,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
37843 kfree(kbs);
37844 return ((p && *p) ? -EOVERFLOW : 0);
37845 case KDSKBSENT:
37846+ if (!capable(CAP_SYS_TTY_CONFIG))
37847+ perm = 0;
37848+
37849 if (!perm) {
37850 ret = -EPERM;
37851 goto reterr;
37852diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
37853index a783d53..cb30d94 100644
37854--- a/drivers/uio/uio.c
37855+++ b/drivers/uio/uio.c
37856@@ -25,6 +25,7 @@
37857 #include <linux/kobject.h>
37858 #include <linux/cdev.h>
37859 #include <linux/uio_driver.h>
37860+#include <asm/local.h>
37861
37862 #define UIO_MAX_DEVICES (1U << MINORBITS)
37863
37864@@ -32,10 +33,10 @@ struct uio_device {
37865 struct module *owner;
37866 struct device *dev;
37867 int minor;
37868- atomic_t event;
37869+ atomic_unchecked_t event;
37870 struct fasync_struct *async_queue;
37871 wait_queue_head_t wait;
37872- int vma_count;
37873+ local_t vma_count;
37874 struct uio_info *info;
37875 struct kobject *map_dir;
37876 struct kobject *portio_dir;
37877@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
37878 struct device_attribute *attr, char *buf)
37879 {
37880 struct uio_device *idev = dev_get_drvdata(dev);
37881- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
37882+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
37883 }
37884
37885 static struct device_attribute uio_class_attributes[] = {
37886@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
37887 {
37888 struct uio_device *idev = info->uio_dev;
37889
37890- atomic_inc(&idev->event);
37891+ atomic_inc_unchecked(&idev->event);
37892 wake_up_interruptible(&idev->wait);
37893 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
37894 }
37895@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
37896 }
37897
37898 listener->dev = idev;
37899- listener->event_count = atomic_read(&idev->event);
37900+ listener->event_count = atomic_read_unchecked(&idev->event);
37901 filep->private_data = listener;
37902
37903 if (idev->info->open) {
37904@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
37905 return -EIO;
37906
37907 poll_wait(filep, &idev->wait, wait);
37908- if (listener->event_count != atomic_read(&idev->event))
37909+ if (listener->event_count != atomic_read_unchecked(&idev->event))
37910 return POLLIN | POLLRDNORM;
37911 return 0;
37912 }
37913@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
37914 do {
37915 set_current_state(TASK_INTERRUPTIBLE);
37916
37917- event_count = atomic_read(&idev->event);
37918+ event_count = atomic_read_unchecked(&idev->event);
37919 if (event_count != listener->event_count) {
37920 if (copy_to_user(buf, &event_count, count))
37921 retval = -EFAULT;
37922@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
37923 static void uio_vma_open(struct vm_area_struct *vma)
37924 {
37925 struct uio_device *idev = vma->vm_private_data;
37926- idev->vma_count++;
37927+ local_inc(&idev->vma_count);
37928 }
37929
37930 static void uio_vma_close(struct vm_area_struct *vma)
37931 {
37932 struct uio_device *idev = vma->vm_private_data;
37933- idev->vma_count--;
37934+ local_dec(&idev->vma_count);
37935 }
37936
37937 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
37938@@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
37939 idev->owner = owner;
37940 idev->info = info;
37941 init_waitqueue_head(&idev->wait);
37942- atomic_set(&idev->event, 0);
37943+ atomic_set_unchecked(&idev->event, 0);
37944
37945 ret = uio_get_minor(idev);
37946 if (ret)
37947diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
37948index 98b89fe..aff824e 100644
37949--- a/drivers/usb/atm/cxacru.c
37950+++ b/drivers/usb/atm/cxacru.c
37951@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
37952 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
37953 if (ret < 2)
37954 return -EINVAL;
37955- if (index < 0 || index > 0x7f)
37956+ if (index > 0x7f)
37957 return -EINVAL;
37958 pos += tmp;
37959
37960diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
37961index d3448ca..d2864ca 100644
37962--- a/drivers/usb/atm/usbatm.c
37963+++ b/drivers/usb/atm/usbatm.c
37964@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37965 if (printk_ratelimit())
37966 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
37967 __func__, vpi, vci);
37968- atomic_inc(&vcc->stats->rx_err);
37969+ atomic_inc_unchecked(&vcc->stats->rx_err);
37970 return;
37971 }
37972
37973@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37974 if (length > ATM_MAX_AAL5_PDU) {
37975 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
37976 __func__, length, vcc);
37977- atomic_inc(&vcc->stats->rx_err);
37978+ atomic_inc_unchecked(&vcc->stats->rx_err);
37979 goto out;
37980 }
37981
37982@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37983 if (sarb->len < pdu_length) {
37984 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
37985 __func__, pdu_length, sarb->len, vcc);
37986- atomic_inc(&vcc->stats->rx_err);
37987+ atomic_inc_unchecked(&vcc->stats->rx_err);
37988 goto out;
37989 }
37990
37991 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
37992 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
37993 __func__, vcc);
37994- atomic_inc(&vcc->stats->rx_err);
37995+ atomic_inc_unchecked(&vcc->stats->rx_err);
37996 goto out;
37997 }
37998
37999@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38000 if (printk_ratelimit())
38001 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
38002 __func__, length);
38003- atomic_inc(&vcc->stats->rx_drop);
38004+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38005 goto out;
38006 }
38007
38008@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38009
38010 vcc->push(vcc, skb);
38011
38012- atomic_inc(&vcc->stats->rx);
38013+ atomic_inc_unchecked(&vcc->stats->rx);
38014 out:
38015 skb_trim(sarb, 0);
38016 }
38017@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
38018 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
38019
38020 usbatm_pop(vcc, skb);
38021- atomic_inc(&vcc->stats->tx);
38022+ atomic_inc_unchecked(&vcc->stats->tx);
38023
38024 skb = skb_dequeue(&instance->sndqueue);
38025 }
38026@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
38027 if (!left--)
38028 return sprintf(page,
38029 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
38030- atomic_read(&atm_dev->stats.aal5.tx),
38031- atomic_read(&atm_dev->stats.aal5.tx_err),
38032- atomic_read(&atm_dev->stats.aal5.rx),
38033- atomic_read(&atm_dev->stats.aal5.rx_err),
38034- atomic_read(&atm_dev->stats.aal5.rx_drop));
38035+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
38036+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
38037+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
38038+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
38039+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
38040
38041 if (!left--) {
38042 if (instance->disconnected)
38043diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
38044index d956965..4179a77 100644
38045--- a/drivers/usb/core/devices.c
38046+++ b/drivers/usb/core/devices.c
38047@@ -126,7 +126,7 @@ static const char format_endpt[] =
38048 * time it gets called.
38049 */
38050 static struct device_connect_event {
38051- atomic_t count;
38052+ atomic_unchecked_t count;
38053 wait_queue_head_t wait;
38054 } device_event = {
38055 .count = ATOMIC_INIT(1),
38056@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
38057
38058 void usbfs_conn_disc_event(void)
38059 {
38060- atomic_add(2, &device_event.count);
38061+ atomic_add_unchecked(2, &device_event.count);
38062 wake_up(&device_event.wait);
38063 }
38064
38065@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
38066
38067 poll_wait(file, &device_event.wait, wait);
38068
38069- event_count = atomic_read(&device_event.count);
38070+ event_count = atomic_read_unchecked(&device_event.count);
38071 if (file->f_version != event_count) {
38072 file->f_version = event_count;
38073 return POLLIN | POLLRDNORM;
38074diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
38075index 1fc8f12..20647c1 100644
38076--- a/drivers/usb/early/ehci-dbgp.c
38077+++ b/drivers/usb/early/ehci-dbgp.c
38078@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
38079
38080 #ifdef CONFIG_KGDB
38081 static struct kgdb_io kgdbdbgp_io_ops;
38082-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
38083+static struct kgdb_io kgdbdbgp_io_ops_console;
38084+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
38085 #else
38086 #define dbgp_kgdb_mode (0)
38087 #endif
38088@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
38089 .write_char = kgdbdbgp_write_char,
38090 };
38091
38092+static struct kgdb_io kgdbdbgp_io_ops_console = {
38093+ .name = "kgdbdbgp",
38094+ .read_char = kgdbdbgp_read_char,
38095+ .write_char = kgdbdbgp_write_char,
38096+ .is_console = 1
38097+};
38098+
38099 static int kgdbdbgp_wait_time;
38100
38101 static int __init kgdbdbgp_parse_config(char *str)
38102@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
38103 ptr++;
38104 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
38105 }
38106- kgdb_register_io_module(&kgdbdbgp_io_ops);
38107- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
38108+ if (early_dbgp_console.index != -1)
38109+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
38110+ else
38111+ kgdb_register_io_module(&kgdbdbgp_io_ops);
38112
38113 return 0;
38114 }
38115diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
38116index d6bea3e..60b250e 100644
38117--- a/drivers/usb/wusbcore/wa-hc.h
38118+++ b/drivers/usb/wusbcore/wa-hc.h
38119@@ -192,7 +192,7 @@ struct wahc {
38120 struct list_head xfer_delayed_list;
38121 spinlock_t xfer_list_lock;
38122 struct work_struct xfer_work;
38123- atomic_t xfer_id_count;
38124+ atomic_unchecked_t xfer_id_count;
38125 };
38126
38127
38128@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
38129 INIT_LIST_HEAD(&wa->xfer_delayed_list);
38130 spin_lock_init(&wa->xfer_list_lock);
38131 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
38132- atomic_set(&wa->xfer_id_count, 1);
38133+ atomic_set_unchecked(&wa->xfer_id_count, 1);
38134 }
38135
38136 /**
38137diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
38138index 57c01ab..8a05959 100644
38139--- a/drivers/usb/wusbcore/wa-xfer.c
38140+++ b/drivers/usb/wusbcore/wa-xfer.c
38141@@ -296,7 +296,7 @@ out:
38142 */
38143 static void wa_xfer_id_init(struct wa_xfer *xfer)
38144 {
38145- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
38146+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
38147 }
38148
38149 /*
38150diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
38151index 51e4c1e..9d87e2a 100644
38152--- a/drivers/vhost/vhost.c
38153+++ b/drivers/vhost/vhost.c
38154@@ -632,7 +632,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
38155 return 0;
38156 }
38157
38158-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
38159+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
38160 {
38161 struct file *eventfp, *filep = NULL,
38162 *pollstart = NULL, *pollstop = NULL;
38163diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
38164index b0b2ac3..89a4399 100644
38165--- a/drivers/video/aty/aty128fb.c
38166+++ b/drivers/video/aty/aty128fb.c
38167@@ -148,7 +148,7 @@ enum {
38168 };
38169
38170 /* Must match above enum */
38171-static const char *r128_family[] __devinitdata = {
38172+static const char *r128_family[] __devinitconst = {
38173 "AGP",
38174 "PCI",
38175 "PRO AGP",
38176diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
38177index 5c3960d..15cf8fc 100644
38178--- a/drivers/video/fbcmap.c
38179+++ b/drivers/video/fbcmap.c
38180@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
38181 rc = -ENODEV;
38182 goto out;
38183 }
38184- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
38185- !info->fbops->fb_setcmap)) {
38186+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
38187 rc = -EINVAL;
38188 goto out1;
38189 }
38190diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
38191index c6ce416..3b9b642 100644
38192--- a/drivers/video/fbmem.c
38193+++ b/drivers/video/fbmem.c
38194@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38195 image->dx += image->width + 8;
38196 }
38197 } else if (rotate == FB_ROTATE_UD) {
38198- for (x = 0; x < num && image->dx >= 0; x++) {
38199+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
38200 info->fbops->fb_imageblit(info, image);
38201 image->dx -= image->width + 8;
38202 }
38203@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38204 image->dy += image->height + 8;
38205 }
38206 } else if (rotate == FB_ROTATE_CCW) {
38207- for (x = 0; x < num && image->dy >= 0; x++) {
38208+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
38209 info->fbops->fb_imageblit(info, image);
38210 image->dy -= image->height + 8;
38211 }
38212@@ -1157,7 +1157,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
38213 return -EFAULT;
38214 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
38215 return -EINVAL;
38216- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
38217+ if (con2fb.framebuffer >= FB_MAX)
38218 return -EINVAL;
38219 if (!registered_fb[con2fb.framebuffer])
38220 request_module("fb%d", con2fb.framebuffer);
38221diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
38222index 5a5d092..265c5ed 100644
38223--- a/drivers/video/geode/gx1fb_core.c
38224+++ b/drivers/video/geode/gx1fb_core.c
38225@@ -29,7 +29,7 @@ static int crt_option = 1;
38226 static char panel_option[32] = "";
38227
38228 /* Modes relevant to the GX1 (taken from modedb.c) */
38229-static const struct fb_videomode __devinitdata gx1_modedb[] = {
38230+static const struct fb_videomode __devinitconst gx1_modedb[] = {
38231 /* 640x480-60 VESA */
38232 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
38233 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
38234diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
38235index 0fad23f..0e9afa4 100644
38236--- a/drivers/video/gxt4500.c
38237+++ b/drivers/video/gxt4500.c
38238@@ -156,7 +156,7 @@ struct gxt4500_par {
38239 static char *mode_option;
38240
38241 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
38242-static const struct fb_videomode defaultmode __devinitdata = {
38243+static const struct fb_videomode defaultmode __devinitconst = {
38244 .refresh = 60,
38245 .xres = 1280,
38246 .yres = 1024,
38247@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
38248 return 0;
38249 }
38250
38251-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
38252+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
38253 .id = "IBM GXT4500P",
38254 .type = FB_TYPE_PACKED_PIXELS,
38255 .visual = FB_VISUAL_PSEUDOCOLOR,
38256diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
38257index 7672d2e..b56437f 100644
38258--- a/drivers/video/i810/i810_accel.c
38259+++ b/drivers/video/i810/i810_accel.c
38260@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
38261 }
38262 }
38263 printk("ringbuffer lockup!!!\n");
38264+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38265 i810_report_error(mmio);
38266 par->dev_flags |= LOCKUP;
38267 info->pixmap.scan_align = 1;
38268diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
38269index b83f361..2b05a91 100644
38270--- a/drivers/video/i810/i810_main.c
38271+++ b/drivers/video/i810/i810_main.c
38272@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
38273 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
38274
38275 /* PCI */
38276-static const char *i810_pci_list[] __devinitdata = {
38277+static const char *i810_pci_list[] __devinitconst = {
38278 "Intel(R) 810 Framebuffer Device" ,
38279 "Intel(R) 810-DC100 Framebuffer Device" ,
38280 "Intel(R) 810E Framebuffer Device" ,
38281diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
38282index de36693..3c63fc2 100644
38283--- a/drivers/video/jz4740_fb.c
38284+++ b/drivers/video/jz4740_fb.c
38285@@ -136,7 +136,7 @@ struct jzfb {
38286 uint32_t pseudo_palette[16];
38287 };
38288
38289-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
38290+static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
38291 .id = "JZ4740 FB",
38292 .type = FB_TYPE_PACKED_PIXELS,
38293 .visual = FB_VISUAL_TRUECOLOR,
38294diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
38295index 3c14e43..eafa544 100644
38296--- a/drivers/video/logo/logo_linux_clut224.ppm
38297+++ b/drivers/video/logo/logo_linux_clut224.ppm
38298@@ -1,1604 +1,1123 @@
38299 P3
38300-# Standard 224-color Linux logo
38301 80 80
38302 255
38303- 0 0 0 0 0 0 0 0 0 0 0 0
38304- 0 0 0 0 0 0 0 0 0 0 0 0
38305- 0 0 0 0 0 0 0 0 0 0 0 0
38306- 0 0 0 0 0 0 0 0 0 0 0 0
38307- 0 0 0 0 0 0 0 0 0 0 0 0
38308- 0 0 0 0 0 0 0 0 0 0 0 0
38309- 0 0 0 0 0 0 0 0 0 0 0 0
38310- 0 0 0 0 0 0 0 0 0 0 0 0
38311- 0 0 0 0 0 0 0 0 0 0 0 0
38312- 6 6 6 6 6 6 10 10 10 10 10 10
38313- 10 10 10 6 6 6 6 6 6 6 6 6
38314- 0 0 0 0 0 0 0 0 0 0 0 0
38315- 0 0 0 0 0 0 0 0 0 0 0 0
38316- 0 0 0 0 0 0 0 0 0 0 0 0
38317- 0 0 0 0 0 0 0 0 0 0 0 0
38318- 0 0 0 0 0 0 0 0 0 0 0 0
38319- 0 0 0 0 0 0 0 0 0 0 0 0
38320- 0 0 0 0 0 0 0 0 0 0 0 0
38321- 0 0 0 0 0 0 0 0 0 0 0 0
38322- 0 0 0 0 0 0 0 0 0 0 0 0
38323- 0 0 0 0 0 0 0 0 0 0 0 0
38324- 0 0 0 0 0 0 0 0 0 0 0 0
38325- 0 0 0 0 0 0 0 0 0 0 0 0
38326- 0 0 0 0 0 0 0 0 0 0 0 0
38327- 0 0 0 0 0 0 0 0 0 0 0 0
38328- 0 0 0 0 0 0 0 0 0 0 0 0
38329- 0 0 0 0 0 0 0 0 0 0 0 0
38330- 0 0 0 0 0 0 0 0 0 0 0 0
38331- 0 0 0 6 6 6 10 10 10 14 14 14
38332- 22 22 22 26 26 26 30 30 30 34 34 34
38333- 30 30 30 30 30 30 26 26 26 18 18 18
38334- 14 14 14 10 10 10 6 6 6 0 0 0
38335- 0 0 0 0 0 0 0 0 0 0 0 0
38336- 0 0 0 0 0 0 0 0 0 0 0 0
38337- 0 0 0 0 0 0 0 0 0 0 0 0
38338- 0 0 0 0 0 0 0 0 0 0 0 0
38339- 0 0 0 0 0 0 0 0 0 0 0 0
38340- 0 0 0 0 0 0 0 0 0 0 0 0
38341- 0 0 0 0 0 0 0 0 0 0 0 0
38342- 0 0 0 0 0 0 0 0 0 0 0 0
38343- 0 0 0 0 0 0 0 0 0 0 0 0
38344- 0 0 0 0 0 1 0 0 1 0 0 0
38345- 0 0 0 0 0 0 0 0 0 0 0 0
38346- 0 0 0 0 0 0 0 0 0 0 0 0
38347- 0 0 0 0 0 0 0 0 0 0 0 0
38348- 0 0 0 0 0 0 0 0 0 0 0 0
38349- 0 0 0 0 0 0 0 0 0 0 0 0
38350- 0 0 0 0 0 0 0 0 0 0 0 0
38351- 6 6 6 14 14 14 26 26 26 42 42 42
38352- 54 54 54 66 66 66 78 78 78 78 78 78
38353- 78 78 78 74 74 74 66 66 66 54 54 54
38354- 42 42 42 26 26 26 18 18 18 10 10 10
38355- 6 6 6 0 0 0 0 0 0 0 0 0
38356- 0 0 0 0 0 0 0 0 0 0 0 0
38357- 0 0 0 0 0 0 0 0 0 0 0 0
38358- 0 0 0 0 0 0 0 0 0 0 0 0
38359- 0 0 0 0 0 0 0 0 0 0 0 0
38360- 0 0 0 0 0 0 0 0 0 0 0 0
38361- 0 0 0 0 0 0 0 0 0 0 0 0
38362- 0 0 0 0 0 0 0 0 0 0 0 0
38363- 0 0 0 0 0 0 0 0 0 0 0 0
38364- 0 0 1 0 0 0 0 0 0 0 0 0
38365- 0 0 0 0 0 0 0 0 0 0 0 0
38366- 0 0 0 0 0 0 0 0 0 0 0 0
38367- 0 0 0 0 0 0 0 0 0 0 0 0
38368- 0 0 0 0 0 0 0 0 0 0 0 0
38369- 0 0 0 0 0 0 0 0 0 0 0 0
38370- 0 0 0 0 0 0 0 0 0 10 10 10
38371- 22 22 22 42 42 42 66 66 66 86 86 86
38372- 66 66 66 38 38 38 38 38 38 22 22 22
38373- 26 26 26 34 34 34 54 54 54 66 66 66
38374- 86 86 86 70 70 70 46 46 46 26 26 26
38375- 14 14 14 6 6 6 0 0 0 0 0 0
38376- 0 0 0 0 0 0 0 0 0 0 0 0
38377- 0 0 0 0 0 0 0 0 0 0 0 0
38378- 0 0 0 0 0 0 0 0 0 0 0 0
38379- 0 0 0 0 0 0 0 0 0 0 0 0
38380- 0 0 0 0 0 0 0 0 0 0 0 0
38381- 0 0 0 0 0 0 0 0 0 0 0 0
38382- 0 0 0 0 0 0 0 0 0 0 0 0
38383- 0 0 0 0 0 0 0 0 0 0 0 0
38384- 0 0 1 0 0 1 0 0 1 0 0 0
38385- 0 0 0 0 0 0 0 0 0 0 0 0
38386- 0 0 0 0 0 0 0 0 0 0 0 0
38387- 0 0 0 0 0 0 0 0 0 0 0 0
38388- 0 0 0 0 0 0 0 0 0 0 0 0
38389- 0 0 0 0 0 0 0 0 0 0 0 0
38390- 0 0 0 0 0 0 10 10 10 26 26 26
38391- 50 50 50 82 82 82 58 58 58 6 6 6
38392- 2 2 6 2 2 6 2 2 6 2 2 6
38393- 2 2 6 2 2 6 2 2 6 2 2 6
38394- 6 6 6 54 54 54 86 86 86 66 66 66
38395- 38 38 38 18 18 18 6 6 6 0 0 0
38396- 0 0 0 0 0 0 0 0 0 0 0 0
38397- 0 0 0 0 0 0 0 0 0 0 0 0
38398- 0 0 0 0 0 0 0 0 0 0 0 0
38399- 0 0 0 0 0 0 0 0 0 0 0 0
38400- 0 0 0 0 0 0 0 0 0 0 0 0
38401- 0 0 0 0 0 0 0 0 0 0 0 0
38402- 0 0 0 0 0 0 0 0 0 0 0 0
38403- 0 0 0 0 0 0 0 0 0 0 0 0
38404- 0 0 0 0 0 0 0 0 0 0 0 0
38405- 0 0 0 0 0 0 0 0 0 0 0 0
38406- 0 0 0 0 0 0 0 0 0 0 0 0
38407- 0 0 0 0 0 0 0 0 0 0 0 0
38408- 0 0 0 0 0 0 0 0 0 0 0 0
38409- 0 0 0 0 0 0 0 0 0 0 0 0
38410- 0 0 0 6 6 6 22 22 22 50 50 50
38411- 78 78 78 34 34 34 2 2 6 2 2 6
38412- 2 2 6 2 2 6 2 2 6 2 2 6
38413- 2 2 6 2 2 6 2 2 6 2 2 6
38414- 2 2 6 2 2 6 6 6 6 70 70 70
38415- 78 78 78 46 46 46 22 22 22 6 6 6
38416- 0 0 0 0 0 0 0 0 0 0 0 0
38417- 0 0 0 0 0 0 0 0 0 0 0 0
38418- 0 0 0 0 0 0 0 0 0 0 0 0
38419- 0 0 0 0 0 0 0 0 0 0 0 0
38420- 0 0 0 0 0 0 0 0 0 0 0 0
38421- 0 0 0 0 0 0 0 0 0 0 0 0
38422- 0 0 0 0 0 0 0 0 0 0 0 0
38423- 0 0 0 0 0 0 0 0 0 0 0 0
38424- 0 0 1 0 0 1 0 0 1 0 0 0
38425- 0 0 0 0 0 0 0 0 0 0 0 0
38426- 0 0 0 0 0 0 0 0 0 0 0 0
38427- 0 0 0 0 0 0 0 0 0 0 0 0
38428- 0 0 0 0 0 0 0 0 0 0 0 0
38429- 0 0 0 0 0 0 0 0 0 0 0 0
38430- 6 6 6 18 18 18 42 42 42 82 82 82
38431- 26 26 26 2 2 6 2 2 6 2 2 6
38432- 2 2 6 2 2 6 2 2 6 2 2 6
38433- 2 2 6 2 2 6 2 2 6 14 14 14
38434- 46 46 46 34 34 34 6 6 6 2 2 6
38435- 42 42 42 78 78 78 42 42 42 18 18 18
38436- 6 6 6 0 0 0 0 0 0 0 0 0
38437- 0 0 0 0 0 0 0 0 0 0 0 0
38438- 0 0 0 0 0 0 0 0 0 0 0 0
38439- 0 0 0 0 0 0 0 0 0 0 0 0
38440- 0 0 0 0 0 0 0 0 0 0 0 0
38441- 0 0 0 0 0 0 0 0 0 0 0 0
38442- 0 0 0 0 0 0 0 0 0 0 0 0
38443- 0 0 0 0 0 0 0 0 0 0 0 0
38444- 0 0 1 0 0 0 0 0 1 0 0 0
38445- 0 0 0 0 0 0 0 0 0 0 0 0
38446- 0 0 0 0 0 0 0 0 0 0 0 0
38447- 0 0 0 0 0 0 0 0 0 0 0 0
38448- 0 0 0 0 0 0 0 0 0 0 0 0
38449- 0 0 0 0 0 0 0 0 0 0 0 0
38450- 10 10 10 30 30 30 66 66 66 58 58 58
38451- 2 2 6 2 2 6 2 2 6 2 2 6
38452- 2 2 6 2 2 6 2 2 6 2 2 6
38453- 2 2 6 2 2 6 2 2 6 26 26 26
38454- 86 86 86 101 101 101 46 46 46 10 10 10
38455- 2 2 6 58 58 58 70 70 70 34 34 34
38456- 10 10 10 0 0 0 0 0 0 0 0 0
38457- 0 0 0 0 0 0 0 0 0 0 0 0
38458- 0 0 0 0 0 0 0 0 0 0 0 0
38459- 0 0 0 0 0 0 0 0 0 0 0 0
38460- 0 0 0 0 0 0 0 0 0 0 0 0
38461- 0 0 0 0 0 0 0 0 0 0 0 0
38462- 0 0 0 0 0 0 0 0 0 0 0 0
38463- 0 0 0 0 0 0 0 0 0 0 0 0
38464- 0 0 1 0 0 1 0 0 1 0 0 0
38465- 0 0 0 0 0 0 0 0 0 0 0 0
38466- 0 0 0 0 0 0 0 0 0 0 0 0
38467- 0 0 0 0 0 0 0 0 0 0 0 0
38468- 0 0 0 0 0 0 0 0 0 0 0 0
38469- 0 0 0 0 0 0 0 0 0 0 0 0
38470- 14 14 14 42 42 42 86 86 86 10 10 10
38471- 2 2 6 2 2 6 2 2 6 2 2 6
38472- 2 2 6 2 2 6 2 2 6 2 2 6
38473- 2 2 6 2 2 6 2 2 6 30 30 30
38474- 94 94 94 94 94 94 58 58 58 26 26 26
38475- 2 2 6 6 6 6 78 78 78 54 54 54
38476- 22 22 22 6 6 6 0 0 0 0 0 0
38477- 0 0 0 0 0 0 0 0 0 0 0 0
38478- 0 0 0 0 0 0 0 0 0 0 0 0
38479- 0 0 0 0 0 0 0 0 0 0 0 0
38480- 0 0 0 0 0 0 0 0 0 0 0 0
38481- 0 0 0 0 0 0 0 0 0 0 0 0
38482- 0 0 0 0 0 0 0 0 0 0 0 0
38483- 0 0 0 0 0 0 0 0 0 0 0 0
38484- 0 0 0 0 0 0 0 0 0 0 0 0
38485- 0 0 0 0 0 0 0 0 0 0 0 0
38486- 0 0 0 0 0 0 0 0 0 0 0 0
38487- 0 0 0 0 0 0 0 0 0 0 0 0
38488- 0 0 0 0 0 0 0 0 0 0 0 0
38489- 0 0 0 0 0 0 0 0 0 6 6 6
38490- 22 22 22 62 62 62 62 62 62 2 2 6
38491- 2 2 6 2 2 6 2 2 6 2 2 6
38492- 2 2 6 2 2 6 2 2 6 2 2 6
38493- 2 2 6 2 2 6 2 2 6 26 26 26
38494- 54 54 54 38 38 38 18 18 18 10 10 10
38495- 2 2 6 2 2 6 34 34 34 82 82 82
38496- 38 38 38 14 14 14 0 0 0 0 0 0
38497- 0 0 0 0 0 0 0 0 0 0 0 0
38498- 0 0 0 0 0 0 0 0 0 0 0 0
38499- 0 0 0 0 0 0 0 0 0 0 0 0
38500- 0 0 0 0 0 0 0 0 0 0 0 0
38501- 0 0 0 0 0 0 0 0 0 0 0 0
38502- 0 0 0 0 0 0 0 0 0 0 0 0
38503- 0 0 0 0 0 0 0 0 0 0 0 0
38504- 0 0 0 0 0 1 0 0 1 0 0 0
38505- 0 0 0 0 0 0 0 0 0 0 0 0
38506- 0 0 0 0 0 0 0 0 0 0 0 0
38507- 0 0 0 0 0 0 0 0 0 0 0 0
38508- 0 0 0 0 0 0 0 0 0 0 0 0
38509- 0 0 0 0 0 0 0 0 0 6 6 6
38510- 30 30 30 78 78 78 30 30 30 2 2 6
38511- 2 2 6 2 2 6 2 2 6 2 2 6
38512- 2 2 6 2 2 6 2 2 6 2 2 6
38513- 2 2 6 2 2 6 2 2 6 10 10 10
38514- 10 10 10 2 2 6 2 2 6 2 2 6
38515- 2 2 6 2 2 6 2 2 6 78 78 78
38516- 50 50 50 18 18 18 6 6 6 0 0 0
38517- 0 0 0 0 0 0 0 0 0 0 0 0
38518- 0 0 0 0 0 0 0 0 0 0 0 0
38519- 0 0 0 0 0 0 0 0 0 0 0 0
38520- 0 0 0 0 0 0 0 0 0 0 0 0
38521- 0 0 0 0 0 0 0 0 0 0 0 0
38522- 0 0 0 0 0 0 0 0 0 0 0 0
38523- 0 0 0 0 0 0 0 0 0 0 0 0
38524- 0 0 1 0 0 0 0 0 0 0 0 0
38525- 0 0 0 0 0 0 0 0 0 0 0 0
38526- 0 0 0 0 0 0 0 0 0 0 0 0
38527- 0 0 0 0 0 0 0 0 0 0 0 0
38528- 0 0 0 0 0 0 0 0 0 0 0 0
38529- 0 0 0 0 0 0 0 0 0 10 10 10
38530- 38 38 38 86 86 86 14 14 14 2 2 6
38531- 2 2 6 2 2 6 2 2 6 2 2 6
38532- 2 2 6 2 2 6 2 2 6 2 2 6
38533- 2 2 6 2 2 6 2 2 6 2 2 6
38534- 2 2 6 2 2 6 2 2 6 2 2 6
38535- 2 2 6 2 2 6 2 2 6 54 54 54
38536- 66 66 66 26 26 26 6 6 6 0 0 0
38537- 0 0 0 0 0 0 0 0 0 0 0 0
38538- 0 0 0 0 0 0 0 0 0 0 0 0
38539- 0 0 0 0 0 0 0 0 0 0 0 0
38540- 0 0 0 0 0 0 0 0 0 0 0 0
38541- 0 0 0 0 0 0 0 0 0 0 0 0
38542- 0 0 0 0 0 0 0 0 0 0 0 0
38543- 0 0 0 0 0 0 0 0 0 0 0 0
38544- 0 0 0 0 0 1 0 0 1 0 0 0
38545- 0 0 0 0 0 0 0 0 0 0 0 0
38546- 0 0 0 0 0 0 0 0 0 0 0 0
38547- 0 0 0 0 0 0 0 0 0 0 0 0
38548- 0 0 0 0 0 0 0 0 0 0 0 0
38549- 0 0 0 0 0 0 0 0 0 14 14 14
38550- 42 42 42 82 82 82 2 2 6 2 2 6
38551- 2 2 6 6 6 6 10 10 10 2 2 6
38552- 2 2 6 2 2 6 2 2 6 2 2 6
38553- 2 2 6 2 2 6 2 2 6 6 6 6
38554- 14 14 14 10 10 10 2 2 6 2 2 6
38555- 2 2 6 2 2 6 2 2 6 18 18 18
38556- 82 82 82 34 34 34 10 10 10 0 0 0
38557- 0 0 0 0 0 0 0 0 0 0 0 0
38558- 0 0 0 0 0 0 0 0 0 0 0 0
38559- 0 0 0 0 0 0 0 0 0 0 0 0
38560- 0 0 0 0 0 0 0 0 0 0 0 0
38561- 0 0 0 0 0 0 0 0 0 0 0 0
38562- 0 0 0 0 0 0 0 0 0 0 0 0
38563- 0 0 0 0 0 0 0 0 0 0 0 0
38564- 0 0 1 0 0 0 0 0 0 0 0 0
38565- 0 0 0 0 0 0 0 0 0 0 0 0
38566- 0 0 0 0 0 0 0 0 0 0 0 0
38567- 0 0 0 0 0 0 0 0 0 0 0 0
38568- 0 0 0 0 0 0 0 0 0 0 0 0
38569- 0 0 0 0 0 0 0 0 0 14 14 14
38570- 46 46 46 86 86 86 2 2 6 2 2 6
38571- 6 6 6 6 6 6 22 22 22 34 34 34
38572- 6 6 6 2 2 6 2 2 6 2 2 6
38573- 2 2 6 2 2 6 18 18 18 34 34 34
38574- 10 10 10 50 50 50 22 22 22 2 2 6
38575- 2 2 6 2 2 6 2 2 6 10 10 10
38576- 86 86 86 42 42 42 14 14 14 0 0 0
38577- 0 0 0 0 0 0 0 0 0 0 0 0
38578- 0 0 0 0 0 0 0 0 0 0 0 0
38579- 0 0 0 0 0 0 0 0 0 0 0 0
38580- 0 0 0 0 0 0 0 0 0 0 0 0
38581- 0 0 0 0 0 0 0 0 0 0 0 0
38582- 0 0 0 0 0 0 0 0 0 0 0 0
38583- 0 0 0 0 0 0 0 0 0 0 0 0
38584- 0 0 1 0 0 1 0 0 1 0 0 0
38585- 0 0 0 0 0 0 0 0 0 0 0 0
38586- 0 0 0 0 0 0 0 0 0 0 0 0
38587- 0 0 0 0 0 0 0 0 0 0 0 0
38588- 0 0 0 0 0 0 0 0 0 0 0 0
38589- 0 0 0 0 0 0 0 0 0 14 14 14
38590- 46 46 46 86 86 86 2 2 6 2 2 6
38591- 38 38 38 116 116 116 94 94 94 22 22 22
38592- 22 22 22 2 2 6 2 2 6 2 2 6
38593- 14 14 14 86 86 86 138 138 138 162 162 162
38594-154 154 154 38 38 38 26 26 26 6 6 6
38595- 2 2 6 2 2 6 2 2 6 2 2 6
38596- 86 86 86 46 46 46 14 14 14 0 0 0
38597- 0 0 0 0 0 0 0 0 0 0 0 0
38598- 0 0 0 0 0 0 0 0 0 0 0 0
38599- 0 0 0 0 0 0 0 0 0 0 0 0
38600- 0 0 0 0 0 0 0 0 0 0 0 0
38601- 0 0 0 0 0 0 0 0 0 0 0 0
38602- 0 0 0 0 0 0 0 0 0 0 0 0
38603- 0 0 0 0 0 0 0 0 0 0 0 0
38604- 0 0 0 0 0 0 0 0 0 0 0 0
38605- 0 0 0 0 0 0 0 0 0 0 0 0
38606- 0 0 0 0 0 0 0 0 0 0 0 0
38607- 0 0 0 0 0 0 0 0 0 0 0 0
38608- 0 0 0 0 0 0 0 0 0 0 0 0
38609- 0 0 0 0 0 0 0 0 0 14 14 14
38610- 46 46 46 86 86 86 2 2 6 14 14 14
38611-134 134 134 198 198 198 195 195 195 116 116 116
38612- 10 10 10 2 2 6 2 2 6 6 6 6
38613-101 98 89 187 187 187 210 210 210 218 218 218
38614-214 214 214 134 134 134 14 14 14 6 6 6
38615- 2 2 6 2 2 6 2 2 6 2 2 6
38616- 86 86 86 50 50 50 18 18 18 6 6 6
38617- 0 0 0 0 0 0 0 0 0 0 0 0
38618- 0 0 0 0 0 0 0 0 0 0 0 0
38619- 0 0 0 0 0 0 0 0 0 0 0 0
38620- 0 0 0 0 0 0 0 0 0 0 0 0
38621- 0 0 0 0 0 0 0 0 0 0 0 0
38622- 0 0 0 0 0 0 0 0 0 0 0 0
38623- 0 0 0 0 0 0 0 0 1 0 0 0
38624- 0 0 1 0 0 1 0 0 1 0 0 0
38625- 0 0 0 0 0 0 0 0 0 0 0 0
38626- 0 0 0 0 0 0 0 0 0 0 0 0
38627- 0 0 0 0 0 0 0 0 0 0 0 0
38628- 0 0 0 0 0 0 0 0 0 0 0 0
38629- 0 0 0 0 0 0 0 0 0 14 14 14
38630- 46 46 46 86 86 86 2 2 6 54 54 54
38631-218 218 218 195 195 195 226 226 226 246 246 246
38632- 58 58 58 2 2 6 2 2 6 30 30 30
38633-210 210 210 253 253 253 174 174 174 123 123 123
38634-221 221 221 234 234 234 74 74 74 2 2 6
38635- 2 2 6 2 2 6 2 2 6 2 2 6
38636- 70 70 70 58 58 58 22 22 22 6 6 6
38637- 0 0 0 0 0 0 0 0 0 0 0 0
38638- 0 0 0 0 0 0 0 0 0 0 0 0
38639- 0 0 0 0 0 0 0 0 0 0 0 0
38640- 0 0 0 0 0 0 0 0 0 0 0 0
38641- 0 0 0 0 0 0 0 0 0 0 0 0
38642- 0 0 0 0 0 0 0 0 0 0 0 0
38643- 0 0 0 0 0 0 0 0 0 0 0 0
38644- 0 0 0 0 0 0 0 0 0 0 0 0
38645- 0 0 0 0 0 0 0 0 0 0 0 0
38646- 0 0 0 0 0 0 0 0 0 0 0 0
38647- 0 0 0 0 0 0 0 0 0 0 0 0
38648- 0 0 0 0 0 0 0 0 0 0 0 0
38649- 0 0 0 0 0 0 0 0 0 14 14 14
38650- 46 46 46 82 82 82 2 2 6 106 106 106
38651-170 170 170 26 26 26 86 86 86 226 226 226
38652-123 123 123 10 10 10 14 14 14 46 46 46
38653-231 231 231 190 190 190 6 6 6 70 70 70
38654- 90 90 90 238 238 238 158 158 158 2 2 6
38655- 2 2 6 2 2 6 2 2 6 2 2 6
38656- 70 70 70 58 58 58 22 22 22 6 6 6
38657- 0 0 0 0 0 0 0 0 0 0 0 0
38658- 0 0 0 0 0 0 0 0 0 0 0 0
38659- 0 0 0 0 0 0 0 0 0 0 0 0
38660- 0 0 0 0 0 0 0 0 0 0 0 0
38661- 0 0 0 0 0 0 0 0 0 0 0 0
38662- 0 0 0 0 0 0 0 0 0 0 0 0
38663- 0 0 0 0 0 0 0 0 1 0 0 0
38664- 0 0 1 0 0 1 0 0 1 0 0 0
38665- 0 0 0 0 0 0 0 0 0 0 0 0
38666- 0 0 0 0 0 0 0 0 0 0 0 0
38667- 0 0 0 0 0 0 0 0 0 0 0 0
38668- 0 0 0 0 0 0 0 0 0 0 0 0
38669- 0 0 0 0 0 0 0 0 0 14 14 14
38670- 42 42 42 86 86 86 6 6 6 116 116 116
38671-106 106 106 6 6 6 70 70 70 149 149 149
38672-128 128 128 18 18 18 38 38 38 54 54 54
38673-221 221 221 106 106 106 2 2 6 14 14 14
38674- 46 46 46 190 190 190 198 198 198 2 2 6
38675- 2 2 6 2 2 6 2 2 6 2 2 6
38676- 74 74 74 62 62 62 22 22 22 6 6 6
38677- 0 0 0 0 0 0 0 0 0 0 0 0
38678- 0 0 0 0 0 0 0 0 0 0 0 0
38679- 0 0 0 0 0 0 0 0 0 0 0 0
38680- 0 0 0 0 0 0 0 0 0 0 0 0
38681- 0 0 0 0 0 0 0 0 0 0 0 0
38682- 0 0 0 0 0 0 0 0 0 0 0 0
38683- 0 0 0 0 0 0 0 0 1 0 0 0
38684- 0 0 1 0 0 0 0 0 1 0 0 0
38685- 0 0 0 0 0 0 0 0 0 0 0 0
38686- 0 0 0 0 0 0 0 0 0 0 0 0
38687- 0 0 0 0 0 0 0 0 0 0 0 0
38688- 0 0 0 0 0 0 0 0 0 0 0 0
38689- 0 0 0 0 0 0 0 0 0 14 14 14
38690- 42 42 42 94 94 94 14 14 14 101 101 101
38691-128 128 128 2 2 6 18 18 18 116 116 116
38692-118 98 46 121 92 8 121 92 8 98 78 10
38693-162 162 162 106 106 106 2 2 6 2 2 6
38694- 2 2 6 195 195 195 195 195 195 6 6 6
38695- 2 2 6 2 2 6 2 2 6 2 2 6
38696- 74 74 74 62 62 62 22 22 22 6 6 6
38697- 0 0 0 0 0 0 0 0 0 0 0 0
38698- 0 0 0 0 0 0 0 0 0 0 0 0
38699- 0 0 0 0 0 0 0 0 0 0 0 0
38700- 0 0 0 0 0 0 0 0 0 0 0 0
38701- 0 0 0 0 0 0 0 0 0 0 0 0
38702- 0 0 0 0 0 0 0 0 0 0 0 0
38703- 0 0 0 0 0 0 0 0 1 0 0 1
38704- 0 0 1 0 0 0 0 0 1 0 0 0
38705- 0 0 0 0 0 0 0 0 0 0 0 0
38706- 0 0 0 0 0 0 0 0 0 0 0 0
38707- 0 0 0 0 0 0 0 0 0 0 0 0
38708- 0 0 0 0 0 0 0 0 0 0 0 0
38709- 0 0 0 0 0 0 0 0 0 10 10 10
38710- 38 38 38 90 90 90 14 14 14 58 58 58
38711-210 210 210 26 26 26 54 38 6 154 114 10
38712-226 170 11 236 186 11 225 175 15 184 144 12
38713-215 174 15 175 146 61 37 26 9 2 2 6
38714- 70 70 70 246 246 246 138 138 138 2 2 6
38715- 2 2 6 2 2 6 2 2 6 2 2 6
38716- 70 70 70 66 66 66 26 26 26 6 6 6
38717- 0 0 0 0 0 0 0 0 0 0 0 0
38718- 0 0 0 0 0 0 0 0 0 0 0 0
38719- 0 0 0 0 0 0 0 0 0 0 0 0
38720- 0 0 0 0 0 0 0 0 0 0 0 0
38721- 0 0 0 0 0 0 0 0 0 0 0 0
38722- 0 0 0 0 0 0 0 0 0 0 0 0
38723- 0 0 0 0 0 0 0 0 0 0 0 0
38724- 0 0 0 0 0 0 0 0 0 0 0 0
38725- 0 0 0 0 0 0 0 0 0 0 0 0
38726- 0 0 0 0 0 0 0 0 0 0 0 0
38727- 0 0 0 0 0 0 0 0 0 0 0 0
38728- 0 0 0 0 0 0 0 0 0 0 0 0
38729- 0 0 0 0 0 0 0 0 0 10 10 10
38730- 38 38 38 86 86 86 14 14 14 10 10 10
38731-195 195 195 188 164 115 192 133 9 225 175 15
38732-239 182 13 234 190 10 232 195 16 232 200 30
38733-245 207 45 241 208 19 232 195 16 184 144 12
38734-218 194 134 211 206 186 42 42 42 2 2 6
38735- 2 2 6 2 2 6 2 2 6 2 2 6
38736- 50 50 50 74 74 74 30 30 30 6 6 6
38737- 0 0 0 0 0 0 0 0 0 0 0 0
38738- 0 0 0 0 0 0 0 0 0 0 0 0
38739- 0 0 0 0 0 0 0 0 0 0 0 0
38740- 0 0 0 0 0 0 0 0 0 0 0 0
38741- 0 0 0 0 0 0 0 0 0 0 0 0
38742- 0 0 0 0 0 0 0 0 0 0 0 0
38743- 0 0 0 0 0 0 0 0 0 0 0 0
38744- 0 0 0 0 0 0 0 0 0 0 0 0
38745- 0 0 0 0 0 0 0 0 0 0 0 0
38746- 0 0 0 0 0 0 0 0 0 0 0 0
38747- 0 0 0 0 0 0 0 0 0 0 0 0
38748- 0 0 0 0 0 0 0 0 0 0 0 0
38749- 0 0 0 0 0 0 0 0 0 10 10 10
38750- 34 34 34 86 86 86 14 14 14 2 2 6
38751-121 87 25 192 133 9 219 162 10 239 182 13
38752-236 186 11 232 195 16 241 208 19 244 214 54
38753-246 218 60 246 218 38 246 215 20 241 208 19
38754-241 208 19 226 184 13 121 87 25 2 2 6
38755- 2 2 6 2 2 6 2 2 6 2 2 6
38756- 50 50 50 82 82 82 34 34 34 10 10 10
38757- 0 0 0 0 0 0 0 0 0 0 0 0
38758- 0 0 0 0 0 0 0 0 0 0 0 0
38759- 0 0 0 0 0 0 0 0 0 0 0 0
38760- 0 0 0 0 0 0 0 0 0 0 0 0
38761- 0 0 0 0 0 0 0 0 0 0 0 0
38762- 0 0 0 0 0 0 0 0 0 0 0 0
38763- 0 0 0 0 0 0 0 0 0 0 0 0
38764- 0 0 0 0 0 0 0 0 0 0 0 0
38765- 0 0 0 0 0 0 0 0 0 0 0 0
38766- 0 0 0 0 0 0 0 0 0 0 0 0
38767- 0 0 0 0 0 0 0 0 0 0 0 0
38768- 0 0 0 0 0 0 0 0 0 0 0 0
38769- 0 0 0 0 0 0 0 0 0 10 10 10
38770- 34 34 34 82 82 82 30 30 30 61 42 6
38771-180 123 7 206 145 10 230 174 11 239 182 13
38772-234 190 10 238 202 15 241 208 19 246 218 74
38773-246 218 38 246 215 20 246 215 20 246 215 20
38774-226 184 13 215 174 15 184 144 12 6 6 6
38775- 2 2 6 2 2 6 2 2 6 2 2 6
38776- 26 26 26 94 94 94 42 42 42 14 14 14
38777- 0 0 0 0 0 0 0 0 0 0 0 0
38778- 0 0 0 0 0 0 0 0 0 0 0 0
38779- 0 0 0 0 0 0 0 0 0 0 0 0
38780- 0 0 0 0 0 0 0 0 0 0 0 0
38781- 0 0 0 0 0 0 0 0 0 0 0 0
38782- 0 0 0 0 0 0 0 0 0 0 0 0
38783- 0 0 0 0 0 0 0 0 0 0 0 0
38784- 0 0 0 0 0 0 0 0 0 0 0 0
38785- 0 0 0 0 0 0 0 0 0 0 0 0
38786- 0 0 0 0 0 0 0 0 0 0 0 0
38787- 0 0 0 0 0 0 0 0 0 0 0 0
38788- 0 0 0 0 0 0 0 0 0 0 0 0
38789- 0 0 0 0 0 0 0 0 0 10 10 10
38790- 30 30 30 78 78 78 50 50 50 104 69 6
38791-192 133 9 216 158 10 236 178 12 236 186 11
38792-232 195 16 241 208 19 244 214 54 245 215 43
38793-246 215 20 246 215 20 241 208 19 198 155 10
38794-200 144 11 216 158 10 156 118 10 2 2 6
38795- 2 2 6 2 2 6 2 2 6 2 2 6
38796- 6 6 6 90 90 90 54 54 54 18 18 18
38797- 6 6 6 0 0 0 0 0 0 0 0 0
38798- 0 0 0 0 0 0 0 0 0 0 0 0
38799- 0 0 0 0 0 0 0 0 0 0 0 0
38800- 0 0 0 0 0 0 0 0 0 0 0 0
38801- 0 0 0 0 0 0 0 0 0 0 0 0
38802- 0 0 0 0 0 0 0 0 0 0 0 0
38803- 0 0 0 0 0 0 0 0 0 0 0 0
38804- 0 0 0 0 0 0 0 0 0 0 0 0
38805- 0 0 0 0 0 0 0 0 0 0 0 0
38806- 0 0 0 0 0 0 0 0 0 0 0 0
38807- 0 0 0 0 0 0 0 0 0 0 0 0
38808- 0 0 0 0 0 0 0 0 0 0 0 0
38809- 0 0 0 0 0 0 0 0 0 10 10 10
38810- 30 30 30 78 78 78 46 46 46 22 22 22
38811-137 92 6 210 162 10 239 182 13 238 190 10
38812-238 202 15 241 208 19 246 215 20 246 215 20
38813-241 208 19 203 166 17 185 133 11 210 150 10
38814-216 158 10 210 150 10 102 78 10 2 2 6
38815- 6 6 6 54 54 54 14 14 14 2 2 6
38816- 2 2 6 62 62 62 74 74 74 30 30 30
38817- 10 10 10 0 0 0 0 0 0 0 0 0
38818- 0 0 0 0 0 0 0 0 0 0 0 0
38819- 0 0 0 0 0 0 0 0 0 0 0 0
38820- 0 0 0 0 0 0 0 0 0 0 0 0
38821- 0 0 0 0 0 0 0 0 0 0 0 0
38822- 0 0 0 0 0 0 0 0 0 0 0 0
38823- 0 0 0 0 0 0 0 0 0 0 0 0
38824- 0 0 0 0 0 0 0 0 0 0 0 0
38825- 0 0 0 0 0 0 0 0 0 0 0 0
38826- 0 0 0 0 0 0 0 0 0 0 0 0
38827- 0 0 0 0 0 0 0 0 0 0 0 0
38828- 0 0 0 0 0 0 0 0 0 0 0 0
38829- 0 0 0 0 0 0 0 0 0 10 10 10
38830- 34 34 34 78 78 78 50 50 50 6 6 6
38831- 94 70 30 139 102 15 190 146 13 226 184 13
38832-232 200 30 232 195 16 215 174 15 190 146 13
38833-168 122 10 192 133 9 210 150 10 213 154 11
38834-202 150 34 182 157 106 101 98 89 2 2 6
38835- 2 2 6 78 78 78 116 116 116 58 58 58
38836- 2 2 6 22 22 22 90 90 90 46 46 46
38837- 18 18 18 6 6 6 0 0 0 0 0 0
38838- 0 0 0 0 0 0 0 0 0 0 0 0
38839- 0 0 0 0 0 0 0 0 0 0 0 0
38840- 0 0 0 0 0 0 0 0 0 0 0 0
38841- 0 0 0 0 0 0 0 0 0 0 0 0
38842- 0 0 0 0 0 0 0 0 0 0 0 0
38843- 0 0 0 0 0 0 0 0 0 0 0 0
38844- 0 0 0 0 0 0 0 0 0 0 0 0
38845- 0 0 0 0 0 0 0 0 0 0 0 0
38846- 0 0 0 0 0 0 0 0 0 0 0 0
38847- 0 0 0 0 0 0 0 0 0 0 0 0
38848- 0 0 0 0 0 0 0 0 0 0 0 0
38849- 0 0 0 0 0 0 0 0 0 10 10 10
38850- 38 38 38 86 86 86 50 50 50 6 6 6
38851-128 128 128 174 154 114 156 107 11 168 122 10
38852-198 155 10 184 144 12 197 138 11 200 144 11
38853-206 145 10 206 145 10 197 138 11 188 164 115
38854-195 195 195 198 198 198 174 174 174 14 14 14
38855- 2 2 6 22 22 22 116 116 116 116 116 116
38856- 22 22 22 2 2 6 74 74 74 70 70 70
38857- 30 30 30 10 10 10 0 0 0 0 0 0
38858- 0 0 0 0 0 0 0 0 0 0 0 0
38859- 0 0 0 0 0 0 0 0 0 0 0 0
38860- 0 0 0 0 0 0 0 0 0 0 0 0
38861- 0 0 0 0 0 0 0 0 0 0 0 0
38862- 0 0 0 0 0 0 0 0 0 0 0 0
38863- 0 0 0 0 0 0 0 0 0 0 0 0
38864- 0 0 0 0 0 0 0 0 0 0 0 0
38865- 0 0 0 0 0 0 0 0 0 0 0 0
38866- 0 0 0 0 0 0 0 0 0 0 0 0
38867- 0 0 0 0 0 0 0 0 0 0 0 0
38868- 0 0 0 0 0 0 0 0 0 0 0 0
38869- 0 0 0 0 0 0 6 6 6 18 18 18
38870- 50 50 50 101 101 101 26 26 26 10 10 10
38871-138 138 138 190 190 190 174 154 114 156 107 11
38872-197 138 11 200 144 11 197 138 11 192 133 9
38873-180 123 7 190 142 34 190 178 144 187 187 187
38874-202 202 202 221 221 221 214 214 214 66 66 66
38875- 2 2 6 2 2 6 50 50 50 62 62 62
38876- 6 6 6 2 2 6 10 10 10 90 90 90
38877- 50 50 50 18 18 18 6 6 6 0 0 0
38878- 0 0 0 0 0 0 0 0 0 0 0 0
38879- 0 0 0 0 0 0 0 0 0 0 0 0
38880- 0 0 0 0 0 0 0 0 0 0 0 0
38881- 0 0 0 0 0 0 0 0 0 0 0 0
38882- 0 0 0 0 0 0 0 0 0 0 0 0
38883- 0 0 0 0 0 0 0 0 0 0 0 0
38884- 0 0 0 0 0 0 0 0 0 0 0 0
38885- 0 0 0 0 0 0 0 0 0 0 0 0
38886- 0 0 0 0 0 0 0 0 0 0 0 0
38887- 0 0 0 0 0 0 0 0 0 0 0 0
38888- 0 0 0 0 0 0 0 0 0 0 0 0
38889- 0 0 0 0 0 0 10 10 10 34 34 34
38890- 74 74 74 74 74 74 2 2 6 6 6 6
38891-144 144 144 198 198 198 190 190 190 178 166 146
38892-154 121 60 156 107 11 156 107 11 168 124 44
38893-174 154 114 187 187 187 190 190 190 210 210 210
38894-246 246 246 253 253 253 253 253 253 182 182 182
38895- 6 6 6 2 2 6 2 2 6 2 2 6
38896- 2 2 6 2 2 6 2 2 6 62 62 62
38897- 74 74 74 34 34 34 14 14 14 0 0 0
38898- 0 0 0 0 0 0 0 0 0 0 0 0
38899- 0 0 0 0 0 0 0 0 0 0 0 0
38900- 0 0 0 0 0 0 0 0 0 0 0 0
38901- 0 0 0 0 0 0 0 0 0 0 0 0
38902- 0 0 0 0 0 0 0 0 0 0 0 0
38903- 0 0 0 0 0 0 0 0 0 0 0 0
38904- 0 0 0 0 0 0 0 0 0 0 0 0
38905- 0 0 0 0 0 0 0 0 0 0 0 0
38906- 0 0 0 0 0 0 0 0 0 0 0 0
38907- 0 0 0 0 0 0 0 0 0 0 0 0
38908- 0 0 0 0 0 0 0 0 0 0 0 0
38909- 0 0 0 10 10 10 22 22 22 54 54 54
38910- 94 94 94 18 18 18 2 2 6 46 46 46
38911-234 234 234 221 221 221 190 190 190 190 190 190
38912-190 190 190 187 187 187 187 187 187 190 190 190
38913-190 190 190 195 195 195 214 214 214 242 242 242
38914-253 253 253 253 253 253 253 253 253 253 253 253
38915- 82 82 82 2 2 6 2 2 6 2 2 6
38916- 2 2 6 2 2 6 2 2 6 14 14 14
38917- 86 86 86 54 54 54 22 22 22 6 6 6
38918- 0 0 0 0 0 0 0 0 0 0 0 0
38919- 0 0 0 0 0 0 0 0 0 0 0 0
38920- 0 0 0 0 0 0 0 0 0 0 0 0
38921- 0 0 0 0 0 0 0 0 0 0 0 0
38922- 0 0 0 0 0 0 0 0 0 0 0 0
38923- 0 0 0 0 0 0 0 0 0 0 0 0
38924- 0 0 0 0 0 0 0 0 0 0 0 0
38925- 0 0 0 0 0 0 0 0 0 0 0 0
38926- 0 0 0 0 0 0 0 0 0 0 0 0
38927- 0 0 0 0 0 0 0 0 0 0 0 0
38928- 0 0 0 0 0 0 0 0 0 0 0 0
38929- 6 6 6 18 18 18 46 46 46 90 90 90
38930- 46 46 46 18 18 18 6 6 6 182 182 182
38931-253 253 253 246 246 246 206 206 206 190 190 190
38932-190 190 190 190 190 190 190 190 190 190 190 190
38933-206 206 206 231 231 231 250 250 250 253 253 253
38934-253 253 253 253 253 253 253 253 253 253 253 253
38935-202 202 202 14 14 14 2 2 6 2 2 6
38936- 2 2 6 2 2 6 2 2 6 2 2 6
38937- 42 42 42 86 86 86 42 42 42 18 18 18
38938- 6 6 6 0 0 0 0 0 0 0 0 0
38939- 0 0 0 0 0 0 0 0 0 0 0 0
38940- 0 0 0 0 0 0 0 0 0 0 0 0
38941- 0 0 0 0 0 0 0 0 0 0 0 0
38942- 0 0 0 0 0 0 0 0 0 0 0 0
38943- 0 0 0 0 0 0 0 0 0 0 0 0
38944- 0 0 0 0 0 0 0 0 0 0 0 0
38945- 0 0 0 0 0 0 0 0 0 0 0 0
38946- 0 0 0 0 0 0 0 0 0 0 0 0
38947- 0 0 0 0 0 0 0 0 0 0 0 0
38948- 0 0 0 0 0 0 0 0 0 6 6 6
38949- 14 14 14 38 38 38 74 74 74 66 66 66
38950- 2 2 6 6 6 6 90 90 90 250 250 250
38951-253 253 253 253 253 253 238 238 238 198 198 198
38952-190 190 190 190 190 190 195 195 195 221 221 221
38953-246 246 246 253 253 253 253 253 253 253 253 253
38954-253 253 253 253 253 253 253 253 253 253 253 253
38955-253 253 253 82 82 82 2 2 6 2 2 6
38956- 2 2 6 2 2 6 2 2 6 2 2 6
38957- 2 2 6 78 78 78 70 70 70 34 34 34
38958- 14 14 14 6 6 6 0 0 0 0 0 0
38959- 0 0 0 0 0 0 0 0 0 0 0 0
38960- 0 0 0 0 0 0 0 0 0 0 0 0
38961- 0 0 0 0 0 0 0 0 0 0 0 0
38962- 0 0 0 0 0 0 0 0 0 0 0 0
38963- 0 0 0 0 0 0 0 0 0 0 0 0
38964- 0 0 0 0 0 0 0 0 0 0 0 0
38965- 0 0 0 0 0 0 0 0 0 0 0 0
38966- 0 0 0 0 0 0 0 0 0 0 0 0
38967- 0 0 0 0 0 0 0 0 0 0 0 0
38968- 0 0 0 0 0 0 0 0 0 14 14 14
38969- 34 34 34 66 66 66 78 78 78 6 6 6
38970- 2 2 6 18 18 18 218 218 218 253 253 253
38971-253 253 253 253 253 253 253 253 253 246 246 246
38972-226 226 226 231 231 231 246 246 246 253 253 253
38973-253 253 253 253 253 253 253 253 253 253 253 253
38974-253 253 253 253 253 253 253 253 253 253 253 253
38975-253 253 253 178 178 178 2 2 6 2 2 6
38976- 2 2 6 2 2 6 2 2 6 2 2 6
38977- 2 2 6 18 18 18 90 90 90 62 62 62
38978- 30 30 30 10 10 10 0 0 0 0 0 0
38979- 0 0 0 0 0 0 0 0 0 0 0 0
38980- 0 0 0 0 0 0 0 0 0 0 0 0
38981- 0 0 0 0 0 0 0 0 0 0 0 0
38982- 0 0 0 0 0 0 0 0 0 0 0 0
38983- 0 0 0 0 0 0 0 0 0 0 0 0
38984- 0 0 0 0 0 0 0 0 0 0 0 0
38985- 0 0 0 0 0 0 0 0 0 0 0 0
38986- 0 0 0 0 0 0 0 0 0 0 0 0
38987- 0 0 0 0 0 0 0 0 0 0 0 0
38988- 0 0 0 0 0 0 10 10 10 26 26 26
38989- 58 58 58 90 90 90 18 18 18 2 2 6
38990- 2 2 6 110 110 110 253 253 253 253 253 253
38991-253 253 253 253 253 253 253 253 253 253 253 253
38992-250 250 250 253 253 253 253 253 253 253 253 253
38993-253 253 253 253 253 253 253 253 253 253 253 253
38994-253 253 253 253 253 253 253 253 253 253 253 253
38995-253 253 253 231 231 231 18 18 18 2 2 6
38996- 2 2 6 2 2 6 2 2 6 2 2 6
38997- 2 2 6 2 2 6 18 18 18 94 94 94
38998- 54 54 54 26 26 26 10 10 10 0 0 0
38999- 0 0 0 0 0 0 0 0 0 0 0 0
39000- 0 0 0 0 0 0 0 0 0 0 0 0
39001- 0 0 0 0 0 0 0 0 0 0 0 0
39002- 0 0 0 0 0 0 0 0 0 0 0 0
39003- 0 0 0 0 0 0 0 0 0 0 0 0
39004- 0 0 0 0 0 0 0 0 0 0 0 0
39005- 0 0 0 0 0 0 0 0 0 0 0 0
39006- 0 0 0 0 0 0 0 0 0 0 0 0
39007- 0 0 0 0 0 0 0 0 0 0 0 0
39008- 0 0 0 6 6 6 22 22 22 50 50 50
39009- 90 90 90 26 26 26 2 2 6 2 2 6
39010- 14 14 14 195 195 195 250 250 250 253 253 253
39011-253 253 253 253 253 253 253 253 253 253 253 253
39012-253 253 253 253 253 253 253 253 253 253 253 253
39013-253 253 253 253 253 253 253 253 253 253 253 253
39014-253 253 253 253 253 253 253 253 253 253 253 253
39015-250 250 250 242 242 242 54 54 54 2 2 6
39016- 2 2 6 2 2 6 2 2 6 2 2 6
39017- 2 2 6 2 2 6 2 2 6 38 38 38
39018- 86 86 86 50 50 50 22 22 22 6 6 6
39019- 0 0 0 0 0 0 0 0 0 0 0 0
39020- 0 0 0 0 0 0 0 0 0 0 0 0
39021- 0 0 0 0 0 0 0 0 0 0 0 0
39022- 0 0 0 0 0 0 0 0 0 0 0 0
39023- 0 0 0 0 0 0 0 0 0 0 0 0
39024- 0 0 0 0 0 0 0 0 0 0 0 0
39025- 0 0 0 0 0 0 0 0 0 0 0 0
39026- 0 0 0 0 0 0 0 0 0 0 0 0
39027- 0 0 0 0 0 0 0 0 0 0 0 0
39028- 6 6 6 14 14 14 38 38 38 82 82 82
39029- 34 34 34 2 2 6 2 2 6 2 2 6
39030- 42 42 42 195 195 195 246 246 246 253 253 253
39031-253 253 253 253 253 253 253 253 253 250 250 250
39032-242 242 242 242 242 242 250 250 250 253 253 253
39033-253 253 253 253 253 253 253 253 253 253 253 253
39034-253 253 253 250 250 250 246 246 246 238 238 238
39035-226 226 226 231 231 231 101 101 101 6 6 6
39036- 2 2 6 2 2 6 2 2 6 2 2 6
39037- 2 2 6 2 2 6 2 2 6 2 2 6
39038- 38 38 38 82 82 82 42 42 42 14 14 14
39039- 6 6 6 0 0 0 0 0 0 0 0 0
39040- 0 0 0 0 0 0 0 0 0 0 0 0
39041- 0 0 0 0 0 0 0 0 0 0 0 0
39042- 0 0 0 0 0 0 0 0 0 0 0 0
39043- 0 0 0 0 0 0 0 0 0 0 0 0
39044- 0 0 0 0 0 0 0 0 0 0 0 0
39045- 0 0 0 0 0 0 0 0 0 0 0 0
39046- 0 0 0 0 0 0 0 0 0 0 0 0
39047- 0 0 0 0 0 0 0 0 0 0 0 0
39048- 10 10 10 26 26 26 62 62 62 66 66 66
39049- 2 2 6 2 2 6 2 2 6 6 6 6
39050- 70 70 70 170 170 170 206 206 206 234 234 234
39051-246 246 246 250 250 250 250 250 250 238 238 238
39052-226 226 226 231 231 231 238 238 238 250 250 250
39053-250 250 250 250 250 250 246 246 246 231 231 231
39054-214 214 214 206 206 206 202 202 202 202 202 202
39055-198 198 198 202 202 202 182 182 182 18 18 18
39056- 2 2 6 2 2 6 2 2 6 2 2 6
39057- 2 2 6 2 2 6 2 2 6 2 2 6
39058- 2 2 6 62 62 62 66 66 66 30 30 30
39059- 10 10 10 0 0 0 0 0 0 0 0 0
39060- 0 0 0 0 0 0 0 0 0 0 0 0
39061- 0 0 0 0 0 0 0 0 0 0 0 0
39062- 0 0 0 0 0 0 0 0 0 0 0 0
39063- 0 0 0 0 0 0 0 0 0 0 0 0
39064- 0 0 0 0 0 0 0 0 0 0 0 0
39065- 0 0 0 0 0 0 0 0 0 0 0 0
39066- 0 0 0 0 0 0 0 0 0 0 0 0
39067- 0 0 0 0 0 0 0 0 0 0 0 0
39068- 14 14 14 42 42 42 82 82 82 18 18 18
39069- 2 2 6 2 2 6 2 2 6 10 10 10
39070- 94 94 94 182 182 182 218 218 218 242 242 242
39071-250 250 250 253 253 253 253 253 253 250 250 250
39072-234 234 234 253 253 253 253 253 253 253 253 253
39073-253 253 253 253 253 253 253 253 253 246 246 246
39074-238 238 238 226 226 226 210 210 210 202 202 202
39075-195 195 195 195 195 195 210 210 210 158 158 158
39076- 6 6 6 14 14 14 50 50 50 14 14 14
39077- 2 2 6 2 2 6 2 2 6 2 2 6
39078- 2 2 6 6 6 6 86 86 86 46 46 46
39079- 18 18 18 6 6 6 0 0 0 0 0 0
39080- 0 0 0 0 0 0 0 0 0 0 0 0
39081- 0 0 0 0 0 0 0 0 0 0 0 0
39082- 0 0 0 0 0 0 0 0 0 0 0 0
39083- 0 0 0 0 0 0 0 0 0 0 0 0
39084- 0 0 0 0 0 0 0 0 0 0 0 0
39085- 0 0 0 0 0 0 0 0 0 0 0 0
39086- 0 0 0 0 0 0 0 0 0 0 0 0
39087- 0 0 0 0 0 0 0 0 0 6 6 6
39088- 22 22 22 54 54 54 70 70 70 2 2 6
39089- 2 2 6 10 10 10 2 2 6 22 22 22
39090-166 166 166 231 231 231 250 250 250 253 253 253
39091-253 253 253 253 253 253 253 253 253 250 250 250
39092-242 242 242 253 253 253 253 253 253 253 253 253
39093-253 253 253 253 253 253 253 253 253 253 253 253
39094-253 253 253 253 253 253 253 253 253 246 246 246
39095-231 231 231 206 206 206 198 198 198 226 226 226
39096- 94 94 94 2 2 6 6 6 6 38 38 38
39097- 30 30 30 2 2 6 2 2 6 2 2 6
39098- 2 2 6 2 2 6 62 62 62 66 66 66
39099- 26 26 26 10 10 10 0 0 0 0 0 0
39100- 0 0 0 0 0 0 0 0 0 0 0 0
39101- 0 0 0 0 0 0 0 0 0 0 0 0
39102- 0 0 0 0 0 0 0 0 0 0 0 0
39103- 0 0 0 0 0 0 0 0 0 0 0 0
39104- 0 0 0 0 0 0 0 0 0 0 0 0
39105- 0 0 0 0 0 0 0 0 0 0 0 0
39106- 0 0 0 0 0 0 0 0 0 0 0 0
39107- 0 0 0 0 0 0 0 0 0 10 10 10
39108- 30 30 30 74 74 74 50 50 50 2 2 6
39109- 26 26 26 26 26 26 2 2 6 106 106 106
39110-238 238 238 253 253 253 253 253 253 253 253 253
39111-253 253 253 253 253 253 253 253 253 253 253 253
39112-253 253 253 253 253 253 253 253 253 253 253 253
39113-253 253 253 253 253 253 253 253 253 253 253 253
39114-253 253 253 253 253 253 253 253 253 253 253 253
39115-253 253 253 246 246 246 218 218 218 202 202 202
39116-210 210 210 14 14 14 2 2 6 2 2 6
39117- 30 30 30 22 22 22 2 2 6 2 2 6
39118- 2 2 6 2 2 6 18 18 18 86 86 86
39119- 42 42 42 14 14 14 0 0 0 0 0 0
39120- 0 0 0 0 0 0 0 0 0 0 0 0
39121- 0 0 0 0 0 0 0 0 0 0 0 0
39122- 0 0 0 0 0 0 0 0 0 0 0 0
39123- 0 0 0 0 0 0 0 0 0 0 0 0
39124- 0 0 0 0 0 0 0 0 0 0 0 0
39125- 0 0 0 0 0 0 0 0 0 0 0 0
39126- 0 0 0 0 0 0 0 0 0 0 0 0
39127- 0 0 0 0 0 0 0 0 0 14 14 14
39128- 42 42 42 90 90 90 22 22 22 2 2 6
39129- 42 42 42 2 2 6 18 18 18 218 218 218
39130-253 253 253 253 253 253 253 253 253 253 253 253
39131-253 253 253 253 253 253 253 253 253 253 253 253
39132-253 253 253 253 253 253 253 253 253 253 253 253
39133-253 253 253 253 253 253 253 253 253 253 253 253
39134-253 253 253 253 253 253 253 253 253 253 253 253
39135-253 253 253 253 253 253 250 250 250 221 221 221
39136-218 218 218 101 101 101 2 2 6 14 14 14
39137- 18 18 18 38 38 38 10 10 10 2 2 6
39138- 2 2 6 2 2 6 2 2 6 78 78 78
39139- 58 58 58 22 22 22 6 6 6 0 0 0
39140- 0 0 0 0 0 0 0 0 0 0 0 0
39141- 0 0 0 0 0 0 0 0 0 0 0 0
39142- 0 0 0 0 0 0 0 0 0 0 0 0
39143- 0 0 0 0 0 0 0 0 0 0 0 0
39144- 0 0 0 0 0 0 0 0 0 0 0 0
39145- 0 0 0 0 0 0 0 0 0 0 0 0
39146- 0 0 0 0 0 0 0 0 0 0 0 0
39147- 0 0 0 0 0 0 6 6 6 18 18 18
39148- 54 54 54 82 82 82 2 2 6 26 26 26
39149- 22 22 22 2 2 6 123 123 123 253 253 253
39150-253 253 253 253 253 253 253 253 253 253 253 253
39151-253 253 253 253 253 253 253 253 253 253 253 253
39152-253 253 253 253 253 253 253 253 253 253 253 253
39153-253 253 253 253 253 253 253 253 253 253 253 253
39154-253 253 253 253 253 253 253 253 253 253 253 253
39155-253 253 253 253 253 253 253 253 253 250 250 250
39156-238 238 238 198 198 198 6 6 6 38 38 38
39157- 58 58 58 26 26 26 38 38 38 2 2 6
39158- 2 2 6 2 2 6 2 2 6 46 46 46
39159- 78 78 78 30 30 30 10 10 10 0 0 0
39160- 0 0 0 0 0 0 0 0 0 0 0 0
39161- 0 0 0 0 0 0 0 0 0 0 0 0
39162- 0 0 0 0 0 0 0 0 0 0 0 0
39163- 0 0 0 0 0 0 0 0 0 0 0 0
39164- 0 0 0 0 0 0 0 0 0 0 0 0
39165- 0 0 0 0 0 0 0 0 0 0 0 0
39166- 0 0 0 0 0 0 0 0 0 0 0 0
39167- 0 0 0 0 0 0 10 10 10 30 30 30
39168- 74 74 74 58 58 58 2 2 6 42 42 42
39169- 2 2 6 22 22 22 231 231 231 253 253 253
39170-253 253 253 253 253 253 253 253 253 253 253 253
39171-253 253 253 253 253 253 253 253 253 250 250 250
39172-253 253 253 253 253 253 253 253 253 253 253 253
39173-253 253 253 253 253 253 253 253 253 253 253 253
39174-253 253 253 253 253 253 253 253 253 253 253 253
39175-253 253 253 253 253 253 253 253 253 253 253 253
39176-253 253 253 246 246 246 46 46 46 38 38 38
39177- 42 42 42 14 14 14 38 38 38 14 14 14
39178- 2 2 6 2 2 6 2 2 6 6 6 6
39179- 86 86 86 46 46 46 14 14 14 0 0 0
39180- 0 0 0 0 0 0 0 0 0 0 0 0
39181- 0 0 0 0 0 0 0 0 0 0 0 0
39182- 0 0 0 0 0 0 0 0 0 0 0 0
39183- 0 0 0 0 0 0 0 0 0 0 0 0
39184- 0 0 0 0 0 0 0 0 0 0 0 0
39185- 0 0 0 0 0 0 0 0 0 0 0 0
39186- 0 0 0 0 0 0 0 0 0 0 0 0
39187- 0 0 0 6 6 6 14 14 14 42 42 42
39188- 90 90 90 18 18 18 18 18 18 26 26 26
39189- 2 2 6 116 116 116 253 253 253 253 253 253
39190-253 253 253 253 253 253 253 253 253 253 253 253
39191-253 253 253 253 253 253 250 250 250 238 238 238
39192-253 253 253 253 253 253 253 253 253 253 253 253
39193-253 253 253 253 253 253 253 253 253 253 253 253
39194-253 253 253 253 253 253 253 253 253 253 253 253
39195-253 253 253 253 253 253 253 253 253 253 253 253
39196-253 253 253 253 253 253 94 94 94 6 6 6
39197- 2 2 6 2 2 6 10 10 10 34 34 34
39198- 2 2 6 2 2 6 2 2 6 2 2 6
39199- 74 74 74 58 58 58 22 22 22 6 6 6
39200- 0 0 0 0 0 0 0 0 0 0 0 0
39201- 0 0 0 0 0 0 0 0 0 0 0 0
39202- 0 0 0 0 0 0 0 0 0 0 0 0
39203- 0 0 0 0 0 0 0 0 0 0 0 0
39204- 0 0 0 0 0 0 0 0 0 0 0 0
39205- 0 0 0 0 0 0 0 0 0 0 0 0
39206- 0 0 0 0 0 0 0 0 0 0 0 0
39207- 0 0 0 10 10 10 26 26 26 66 66 66
39208- 82 82 82 2 2 6 38 38 38 6 6 6
39209- 14 14 14 210 210 210 253 253 253 253 253 253
39210-253 253 253 253 253 253 253 253 253 253 253 253
39211-253 253 253 253 253 253 246 246 246 242 242 242
39212-253 253 253 253 253 253 253 253 253 253 253 253
39213-253 253 253 253 253 253 253 253 253 253 253 253
39214-253 253 253 253 253 253 253 253 253 253 253 253
39215-253 253 253 253 253 253 253 253 253 253 253 253
39216-253 253 253 253 253 253 144 144 144 2 2 6
39217- 2 2 6 2 2 6 2 2 6 46 46 46
39218- 2 2 6 2 2 6 2 2 6 2 2 6
39219- 42 42 42 74 74 74 30 30 30 10 10 10
39220- 0 0 0 0 0 0 0 0 0 0 0 0
39221- 0 0 0 0 0 0 0 0 0 0 0 0
39222- 0 0 0 0 0 0 0 0 0 0 0 0
39223- 0 0 0 0 0 0 0 0 0 0 0 0
39224- 0 0 0 0 0 0 0 0 0 0 0 0
39225- 0 0 0 0 0 0 0 0 0 0 0 0
39226- 0 0 0 0 0 0 0 0 0 0 0 0
39227- 6 6 6 14 14 14 42 42 42 90 90 90
39228- 26 26 26 6 6 6 42 42 42 2 2 6
39229- 74 74 74 250 250 250 253 253 253 253 253 253
39230-253 253 253 253 253 253 253 253 253 253 253 253
39231-253 253 253 253 253 253 242 242 242 242 242 242
39232-253 253 253 253 253 253 253 253 253 253 253 253
39233-253 253 253 253 253 253 253 253 253 253 253 253
39234-253 253 253 253 253 253 253 253 253 253 253 253
39235-253 253 253 253 253 253 253 253 253 253 253 253
39236-253 253 253 253 253 253 182 182 182 2 2 6
39237- 2 2 6 2 2 6 2 2 6 46 46 46
39238- 2 2 6 2 2 6 2 2 6 2 2 6
39239- 10 10 10 86 86 86 38 38 38 10 10 10
39240- 0 0 0 0 0 0 0 0 0 0 0 0
39241- 0 0 0 0 0 0 0 0 0 0 0 0
39242- 0 0 0 0 0 0 0 0 0 0 0 0
39243- 0 0 0 0 0 0 0 0 0 0 0 0
39244- 0 0 0 0 0 0 0 0 0 0 0 0
39245- 0 0 0 0 0 0 0 0 0 0 0 0
39246- 0 0 0 0 0 0 0 0 0 0 0 0
39247- 10 10 10 26 26 26 66 66 66 82 82 82
39248- 2 2 6 22 22 22 18 18 18 2 2 6
39249-149 149 149 253 253 253 253 253 253 253 253 253
39250-253 253 253 253 253 253 253 253 253 253 253 253
39251-253 253 253 253 253 253 234 234 234 242 242 242
39252-253 253 253 253 253 253 253 253 253 253 253 253
39253-253 253 253 253 253 253 253 253 253 253 253 253
39254-253 253 253 253 253 253 253 253 253 253 253 253
39255-253 253 253 253 253 253 253 253 253 253 253 253
39256-253 253 253 253 253 253 206 206 206 2 2 6
39257- 2 2 6 2 2 6 2 2 6 38 38 38
39258- 2 2 6 2 2 6 2 2 6 2 2 6
39259- 6 6 6 86 86 86 46 46 46 14 14 14
39260- 0 0 0 0 0 0 0 0 0 0 0 0
39261- 0 0 0 0 0 0 0 0 0 0 0 0
39262- 0 0 0 0 0 0 0 0 0 0 0 0
39263- 0 0 0 0 0 0 0 0 0 0 0 0
39264- 0 0 0 0 0 0 0 0 0 0 0 0
39265- 0 0 0 0 0 0 0 0 0 0 0 0
39266- 0 0 0 0 0 0 0 0 0 6 6 6
39267- 18 18 18 46 46 46 86 86 86 18 18 18
39268- 2 2 6 34 34 34 10 10 10 6 6 6
39269-210 210 210 253 253 253 253 253 253 253 253 253
39270-253 253 253 253 253 253 253 253 253 253 253 253
39271-253 253 253 253 253 253 234 234 234 242 242 242
39272-253 253 253 253 253 253 253 253 253 253 253 253
39273-253 253 253 253 253 253 253 253 253 253 253 253
39274-253 253 253 253 253 253 253 253 253 253 253 253
39275-253 253 253 253 253 253 253 253 253 253 253 253
39276-253 253 253 253 253 253 221 221 221 6 6 6
39277- 2 2 6 2 2 6 6 6 6 30 30 30
39278- 2 2 6 2 2 6 2 2 6 2 2 6
39279- 2 2 6 82 82 82 54 54 54 18 18 18
39280- 6 6 6 0 0 0 0 0 0 0 0 0
39281- 0 0 0 0 0 0 0 0 0 0 0 0
39282- 0 0 0 0 0 0 0 0 0 0 0 0
39283- 0 0 0 0 0 0 0 0 0 0 0 0
39284- 0 0 0 0 0 0 0 0 0 0 0 0
39285- 0 0 0 0 0 0 0 0 0 0 0 0
39286- 0 0 0 0 0 0 0 0 0 10 10 10
39287- 26 26 26 66 66 66 62 62 62 2 2 6
39288- 2 2 6 38 38 38 10 10 10 26 26 26
39289-238 238 238 253 253 253 253 253 253 253 253 253
39290-253 253 253 253 253 253 253 253 253 253 253 253
39291-253 253 253 253 253 253 231 231 231 238 238 238
39292-253 253 253 253 253 253 253 253 253 253 253 253
39293-253 253 253 253 253 253 253 253 253 253 253 253
39294-253 253 253 253 253 253 253 253 253 253 253 253
39295-253 253 253 253 253 253 253 253 253 253 253 253
39296-253 253 253 253 253 253 231 231 231 6 6 6
39297- 2 2 6 2 2 6 10 10 10 30 30 30
39298- 2 2 6 2 2 6 2 2 6 2 2 6
39299- 2 2 6 66 66 66 58 58 58 22 22 22
39300- 6 6 6 0 0 0 0 0 0 0 0 0
39301- 0 0 0 0 0 0 0 0 0 0 0 0
39302- 0 0 0 0 0 0 0 0 0 0 0 0
39303- 0 0 0 0 0 0 0 0 0 0 0 0
39304- 0 0 0 0 0 0 0 0 0 0 0 0
39305- 0 0 0 0 0 0 0 0 0 0 0 0
39306- 0 0 0 0 0 0 0 0 0 10 10 10
39307- 38 38 38 78 78 78 6 6 6 2 2 6
39308- 2 2 6 46 46 46 14 14 14 42 42 42
39309-246 246 246 253 253 253 253 253 253 253 253 253
39310-253 253 253 253 253 253 253 253 253 253 253 253
39311-253 253 253 253 253 253 231 231 231 242 242 242
39312-253 253 253 253 253 253 253 253 253 253 253 253
39313-253 253 253 253 253 253 253 253 253 253 253 253
39314-253 253 253 253 253 253 253 253 253 253 253 253
39315-253 253 253 253 253 253 253 253 253 253 253 253
39316-253 253 253 253 253 253 234 234 234 10 10 10
39317- 2 2 6 2 2 6 22 22 22 14 14 14
39318- 2 2 6 2 2 6 2 2 6 2 2 6
39319- 2 2 6 66 66 66 62 62 62 22 22 22
39320- 6 6 6 0 0 0 0 0 0 0 0 0
39321- 0 0 0 0 0 0 0 0 0 0 0 0
39322- 0 0 0 0 0 0 0 0 0 0 0 0
39323- 0 0 0 0 0 0 0 0 0 0 0 0
39324- 0 0 0 0 0 0 0 0 0 0 0 0
39325- 0 0 0 0 0 0 0 0 0 0 0 0
39326- 0 0 0 0 0 0 6 6 6 18 18 18
39327- 50 50 50 74 74 74 2 2 6 2 2 6
39328- 14 14 14 70 70 70 34 34 34 62 62 62
39329-250 250 250 253 253 253 253 253 253 253 253 253
39330-253 253 253 253 253 253 253 253 253 253 253 253
39331-253 253 253 253 253 253 231 231 231 246 246 246
39332-253 253 253 253 253 253 253 253 253 253 253 253
39333-253 253 253 253 253 253 253 253 253 253 253 253
39334-253 253 253 253 253 253 253 253 253 253 253 253
39335-253 253 253 253 253 253 253 253 253 253 253 253
39336-253 253 253 253 253 253 234 234 234 14 14 14
39337- 2 2 6 2 2 6 30 30 30 2 2 6
39338- 2 2 6 2 2 6 2 2 6 2 2 6
39339- 2 2 6 66 66 66 62 62 62 22 22 22
39340- 6 6 6 0 0 0 0 0 0 0 0 0
39341- 0 0 0 0 0 0 0 0 0 0 0 0
39342- 0 0 0 0 0 0 0 0 0 0 0 0
39343- 0 0 0 0 0 0 0 0 0 0 0 0
39344- 0 0 0 0 0 0 0 0 0 0 0 0
39345- 0 0 0 0 0 0 0 0 0 0 0 0
39346- 0 0 0 0 0 0 6 6 6 18 18 18
39347- 54 54 54 62 62 62 2 2 6 2 2 6
39348- 2 2 6 30 30 30 46 46 46 70 70 70
39349-250 250 250 253 253 253 253 253 253 253 253 253
39350-253 253 253 253 253 253 253 253 253 253 253 253
39351-253 253 253 253 253 253 231 231 231 246 246 246
39352-253 253 253 253 253 253 253 253 253 253 253 253
39353-253 253 253 253 253 253 253 253 253 253 253 253
39354-253 253 253 253 253 253 253 253 253 253 253 253
39355-253 253 253 253 253 253 253 253 253 253 253 253
39356-253 253 253 253 253 253 226 226 226 10 10 10
39357- 2 2 6 6 6 6 30 30 30 2 2 6
39358- 2 2 6 2 2 6 2 2 6 2 2 6
39359- 2 2 6 66 66 66 58 58 58 22 22 22
39360- 6 6 6 0 0 0 0 0 0 0 0 0
39361- 0 0 0 0 0 0 0 0 0 0 0 0
39362- 0 0 0 0 0 0 0 0 0 0 0 0
39363- 0 0 0 0 0 0 0 0 0 0 0 0
39364- 0 0 0 0 0 0 0 0 0 0 0 0
39365- 0 0 0 0 0 0 0 0 0 0 0 0
39366- 0 0 0 0 0 0 6 6 6 22 22 22
39367- 58 58 58 62 62 62 2 2 6 2 2 6
39368- 2 2 6 2 2 6 30 30 30 78 78 78
39369-250 250 250 253 253 253 253 253 253 253 253 253
39370-253 253 253 253 253 253 253 253 253 253 253 253
39371-253 253 253 253 253 253 231 231 231 246 246 246
39372-253 253 253 253 253 253 253 253 253 253 253 253
39373-253 253 253 253 253 253 253 253 253 253 253 253
39374-253 253 253 253 253 253 253 253 253 253 253 253
39375-253 253 253 253 253 253 253 253 253 253 253 253
39376-253 253 253 253 253 253 206 206 206 2 2 6
39377- 22 22 22 34 34 34 18 14 6 22 22 22
39378- 26 26 26 18 18 18 6 6 6 2 2 6
39379- 2 2 6 82 82 82 54 54 54 18 18 18
39380- 6 6 6 0 0 0 0 0 0 0 0 0
39381- 0 0 0 0 0 0 0 0 0 0 0 0
39382- 0 0 0 0 0 0 0 0 0 0 0 0
39383- 0 0 0 0 0 0 0 0 0 0 0 0
39384- 0 0 0 0 0 0 0 0 0 0 0 0
39385- 0 0 0 0 0 0 0 0 0 0 0 0
39386- 0 0 0 0 0 0 6 6 6 26 26 26
39387- 62 62 62 106 106 106 74 54 14 185 133 11
39388-210 162 10 121 92 8 6 6 6 62 62 62
39389-238 238 238 253 253 253 253 253 253 253 253 253
39390-253 253 253 253 253 253 253 253 253 253 253 253
39391-253 253 253 253 253 253 231 231 231 246 246 246
39392-253 253 253 253 253 253 253 253 253 253 253 253
39393-253 253 253 253 253 253 253 253 253 253 253 253
39394-253 253 253 253 253 253 253 253 253 253 253 253
39395-253 253 253 253 253 253 253 253 253 253 253 253
39396-253 253 253 253 253 253 158 158 158 18 18 18
39397- 14 14 14 2 2 6 2 2 6 2 2 6
39398- 6 6 6 18 18 18 66 66 66 38 38 38
39399- 6 6 6 94 94 94 50 50 50 18 18 18
39400- 6 6 6 0 0 0 0 0 0 0 0 0
39401- 0 0 0 0 0 0 0 0 0 0 0 0
39402- 0 0 0 0 0 0 0 0 0 0 0 0
39403- 0 0 0 0 0 0 0 0 0 0 0 0
39404- 0 0 0 0 0 0 0 0 0 0 0 0
39405- 0 0 0 0 0 0 0 0 0 6 6 6
39406- 10 10 10 10 10 10 18 18 18 38 38 38
39407- 78 78 78 142 134 106 216 158 10 242 186 14
39408-246 190 14 246 190 14 156 118 10 10 10 10
39409- 90 90 90 238 238 238 253 253 253 253 253 253
39410-253 253 253 253 253 253 253 253 253 253 253 253
39411-253 253 253 253 253 253 231 231 231 250 250 250
39412-253 253 253 253 253 253 253 253 253 253 253 253
39413-253 253 253 253 253 253 253 253 253 253 253 253
39414-253 253 253 253 253 253 253 253 253 253 253 253
39415-253 253 253 253 253 253 253 253 253 246 230 190
39416-238 204 91 238 204 91 181 142 44 37 26 9
39417- 2 2 6 2 2 6 2 2 6 2 2 6
39418- 2 2 6 2 2 6 38 38 38 46 46 46
39419- 26 26 26 106 106 106 54 54 54 18 18 18
39420- 6 6 6 0 0 0 0 0 0 0 0 0
39421- 0 0 0 0 0 0 0 0 0 0 0 0
39422- 0 0 0 0 0 0 0 0 0 0 0 0
39423- 0 0 0 0 0 0 0 0 0 0 0 0
39424- 0 0 0 0 0 0 0 0 0 0 0 0
39425- 0 0 0 6 6 6 14 14 14 22 22 22
39426- 30 30 30 38 38 38 50 50 50 70 70 70
39427-106 106 106 190 142 34 226 170 11 242 186 14
39428-246 190 14 246 190 14 246 190 14 154 114 10
39429- 6 6 6 74 74 74 226 226 226 253 253 253
39430-253 253 253 253 253 253 253 253 253 253 253 253
39431-253 253 253 253 253 253 231 231 231 250 250 250
39432-253 253 253 253 253 253 253 253 253 253 253 253
39433-253 253 253 253 253 253 253 253 253 253 253 253
39434-253 253 253 253 253 253 253 253 253 253 253 253
39435-253 253 253 253 253 253 253 253 253 228 184 62
39436-241 196 14 241 208 19 232 195 16 38 30 10
39437- 2 2 6 2 2 6 2 2 6 2 2 6
39438- 2 2 6 6 6 6 30 30 30 26 26 26
39439-203 166 17 154 142 90 66 66 66 26 26 26
39440- 6 6 6 0 0 0 0 0 0 0 0 0
39441- 0 0 0 0 0 0 0 0 0 0 0 0
39442- 0 0 0 0 0 0 0 0 0 0 0 0
39443- 0 0 0 0 0 0 0 0 0 0 0 0
39444- 0 0 0 0 0 0 0 0 0 0 0 0
39445- 6 6 6 18 18 18 38 38 38 58 58 58
39446- 78 78 78 86 86 86 101 101 101 123 123 123
39447-175 146 61 210 150 10 234 174 13 246 186 14
39448-246 190 14 246 190 14 246 190 14 238 190 10
39449-102 78 10 2 2 6 46 46 46 198 198 198
39450-253 253 253 253 253 253 253 253 253 253 253 253
39451-253 253 253 253 253 253 234 234 234 242 242 242
39452-253 253 253 253 253 253 253 253 253 253 253 253
39453-253 253 253 253 253 253 253 253 253 253 253 253
39454-253 253 253 253 253 253 253 253 253 253 253 253
39455-253 253 253 253 253 253 253 253 253 224 178 62
39456-242 186 14 241 196 14 210 166 10 22 18 6
39457- 2 2 6 2 2 6 2 2 6 2 2 6
39458- 2 2 6 2 2 6 6 6 6 121 92 8
39459-238 202 15 232 195 16 82 82 82 34 34 34
39460- 10 10 10 0 0 0 0 0 0 0 0 0
39461- 0 0 0 0 0 0 0 0 0 0 0 0
39462- 0 0 0 0 0 0 0 0 0 0 0 0
39463- 0 0 0 0 0 0 0 0 0 0 0 0
39464- 0 0 0 0 0 0 0 0 0 0 0 0
39465- 14 14 14 38 38 38 70 70 70 154 122 46
39466-190 142 34 200 144 11 197 138 11 197 138 11
39467-213 154 11 226 170 11 242 186 14 246 190 14
39468-246 190 14 246 190 14 246 190 14 246 190 14
39469-225 175 15 46 32 6 2 2 6 22 22 22
39470-158 158 158 250 250 250 253 253 253 253 253 253
39471-253 253 253 253 253 253 253 253 253 253 253 253
39472-253 253 253 253 253 253 253 253 253 253 253 253
39473-253 253 253 253 253 253 253 253 253 253 253 253
39474-253 253 253 253 253 253 253 253 253 253 253 253
39475-253 253 253 250 250 250 242 242 242 224 178 62
39476-239 182 13 236 186 11 213 154 11 46 32 6
39477- 2 2 6 2 2 6 2 2 6 2 2 6
39478- 2 2 6 2 2 6 61 42 6 225 175 15
39479-238 190 10 236 186 11 112 100 78 42 42 42
39480- 14 14 14 0 0 0 0 0 0 0 0 0
39481- 0 0 0 0 0 0 0 0 0 0 0 0
39482- 0 0 0 0 0 0 0 0 0 0 0 0
39483- 0 0 0 0 0 0 0 0 0 0 0 0
39484- 0 0 0 0 0 0 0 0 0 6 6 6
39485- 22 22 22 54 54 54 154 122 46 213 154 11
39486-226 170 11 230 174 11 226 170 11 226 170 11
39487-236 178 12 242 186 14 246 190 14 246 190 14
39488-246 190 14 246 190 14 246 190 14 246 190 14
39489-241 196 14 184 144 12 10 10 10 2 2 6
39490- 6 6 6 116 116 116 242 242 242 253 253 253
39491-253 253 253 253 253 253 253 253 253 253 253 253
39492-253 253 253 253 253 253 253 253 253 253 253 253
39493-253 253 253 253 253 253 253 253 253 253 253 253
39494-253 253 253 253 253 253 253 253 253 253 253 253
39495-253 253 253 231 231 231 198 198 198 214 170 54
39496-236 178 12 236 178 12 210 150 10 137 92 6
39497- 18 14 6 2 2 6 2 2 6 2 2 6
39498- 6 6 6 70 47 6 200 144 11 236 178 12
39499-239 182 13 239 182 13 124 112 88 58 58 58
39500- 22 22 22 6 6 6 0 0 0 0 0 0
39501- 0 0 0 0 0 0 0 0 0 0 0 0
39502- 0 0 0 0 0 0 0 0 0 0 0 0
39503- 0 0 0 0 0 0 0 0 0 0 0 0
39504- 0 0 0 0 0 0 0 0 0 10 10 10
39505- 30 30 30 70 70 70 180 133 36 226 170 11
39506-239 182 13 242 186 14 242 186 14 246 186 14
39507-246 190 14 246 190 14 246 190 14 246 190 14
39508-246 190 14 246 190 14 246 190 14 246 190 14
39509-246 190 14 232 195 16 98 70 6 2 2 6
39510- 2 2 6 2 2 6 66 66 66 221 221 221
39511-253 253 253 253 253 253 253 253 253 253 253 253
39512-253 253 253 253 253 253 253 253 253 253 253 253
39513-253 253 253 253 253 253 253 253 253 253 253 253
39514-253 253 253 253 253 253 253 253 253 253 253 253
39515-253 253 253 206 206 206 198 198 198 214 166 58
39516-230 174 11 230 174 11 216 158 10 192 133 9
39517-163 110 8 116 81 8 102 78 10 116 81 8
39518-167 114 7 197 138 11 226 170 11 239 182 13
39519-242 186 14 242 186 14 162 146 94 78 78 78
39520- 34 34 34 14 14 14 6 6 6 0 0 0
39521- 0 0 0 0 0 0 0 0 0 0 0 0
39522- 0 0 0 0 0 0 0 0 0 0 0 0
39523- 0 0 0 0 0 0 0 0 0 0 0 0
39524- 0 0 0 0 0 0 0 0 0 6 6 6
39525- 30 30 30 78 78 78 190 142 34 226 170 11
39526-239 182 13 246 190 14 246 190 14 246 190 14
39527-246 190 14 246 190 14 246 190 14 246 190 14
39528-246 190 14 246 190 14 246 190 14 246 190 14
39529-246 190 14 241 196 14 203 166 17 22 18 6
39530- 2 2 6 2 2 6 2 2 6 38 38 38
39531-218 218 218 253 253 253 253 253 253 253 253 253
39532-253 253 253 253 253 253 253 253 253 253 253 253
39533-253 253 253 253 253 253 253 253 253 253 253 253
39534-253 253 253 253 253 253 253 253 253 253 253 253
39535-250 250 250 206 206 206 198 198 198 202 162 69
39536-226 170 11 236 178 12 224 166 10 210 150 10
39537-200 144 11 197 138 11 192 133 9 197 138 11
39538-210 150 10 226 170 11 242 186 14 246 190 14
39539-246 190 14 246 186 14 225 175 15 124 112 88
39540- 62 62 62 30 30 30 14 14 14 6 6 6
39541- 0 0 0 0 0 0 0 0 0 0 0 0
39542- 0 0 0 0 0 0 0 0 0 0 0 0
39543- 0 0 0 0 0 0 0 0 0 0 0 0
39544- 0 0 0 0 0 0 0 0 0 10 10 10
39545- 30 30 30 78 78 78 174 135 50 224 166 10
39546-239 182 13 246 190 14 246 190 14 246 190 14
39547-246 190 14 246 190 14 246 190 14 246 190 14
39548-246 190 14 246 190 14 246 190 14 246 190 14
39549-246 190 14 246 190 14 241 196 14 139 102 15
39550- 2 2 6 2 2 6 2 2 6 2 2 6
39551- 78 78 78 250 250 250 253 253 253 253 253 253
39552-253 253 253 253 253 253 253 253 253 253 253 253
39553-253 253 253 253 253 253 253 253 253 253 253 253
39554-253 253 253 253 253 253 253 253 253 253 253 253
39555-250 250 250 214 214 214 198 198 198 190 150 46
39556-219 162 10 236 178 12 234 174 13 224 166 10
39557-216 158 10 213 154 11 213 154 11 216 158 10
39558-226 170 11 239 182 13 246 190 14 246 190 14
39559-246 190 14 246 190 14 242 186 14 206 162 42
39560-101 101 101 58 58 58 30 30 30 14 14 14
39561- 6 6 6 0 0 0 0 0 0 0 0 0
39562- 0 0 0 0 0 0 0 0 0 0 0 0
39563- 0 0 0 0 0 0 0 0 0 0 0 0
39564- 0 0 0 0 0 0 0 0 0 10 10 10
39565- 30 30 30 74 74 74 174 135 50 216 158 10
39566-236 178 12 246 190 14 246 190 14 246 190 14
39567-246 190 14 246 190 14 246 190 14 246 190 14
39568-246 190 14 246 190 14 246 190 14 246 190 14
39569-246 190 14 246 190 14 241 196 14 226 184 13
39570- 61 42 6 2 2 6 2 2 6 2 2 6
39571- 22 22 22 238 238 238 253 253 253 253 253 253
39572-253 253 253 253 253 253 253 253 253 253 253 253
39573-253 253 253 253 253 253 253 253 253 253 253 253
39574-253 253 253 253 253 253 253 253 253 253 253 253
39575-253 253 253 226 226 226 187 187 187 180 133 36
39576-216 158 10 236 178 12 239 182 13 236 178 12
39577-230 174 11 226 170 11 226 170 11 230 174 11
39578-236 178 12 242 186 14 246 190 14 246 190 14
39579-246 190 14 246 190 14 246 186 14 239 182 13
39580-206 162 42 106 106 106 66 66 66 34 34 34
39581- 14 14 14 6 6 6 0 0 0 0 0 0
39582- 0 0 0 0 0 0 0 0 0 0 0 0
39583- 0 0 0 0 0 0 0 0 0 0 0 0
39584- 0 0 0 0 0 0 0 0 0 6 6 6
39585- 26 26 26 70 70 70 163 133 67 213 154 11
39586-236 178 12 246 190 14 246 190 14 246 190 14
39587-246 190 14 246 190 14 246 190 14 246 190 14
39588-246 190 14 246 190 14 246 190 14 246 190 14
39589-246 190 14 246 190 14 246 190 14 241 196 14
39590-190 146 13 18 14 6 2 2 6 2 2 6
39591- 46 46 46 246 246 246 253 253 253 253 253 253
39592-253 253 253 253 253 253 253 253 253 253 253 253
39593-253 253 253 253 253 253 253 253 253 253 253 253
39594-253 253 253 253 253 253 253 253 253 253 253 253
39595-253 253 253 221 221 221 86 86 86 156 107 11
39596-216 158 10 236 178 12 242 186 14 246 186 14
39597-242 186 14 239 182 13 239 182 13 242 186 14
39598-242 186 14 246 186 14 246 190 14 246 190 14
39599-246 190 14 246 190 14 246 190 14 246 190 14
39600-242 186 14 225 175 15 142 122 72 66 66 66
39601- 30 30 30 10 10 10 0 0 0 0 0 0
39602- 0 0 0 0 0 0 0 0 0 0 0 0
39603- 0 0 0 0 0 0 0 0 0 0 0 0
39604- 0 0 0 0 0 0 0 0 0 6 6 6
39605- 26 26 26 70 70 70 163 133 67 210 150 10
39606-236 178 12 246 190 14 246 190 14 246 190 14
39607-246 190 14 246 190 14 246 190 14 246 190 14
39608-246 190 14 246 190 14 246 190 14 246 190 14
39609-246 190 14 246 190 14 246 190 14 246 190 14
39610-232 195 16 121 92 8 34 34 34 106 106 106
39611-221 221 221 253 253 253 253 253 253 253 253 253
39612-253 253 253 253 253 253 253 253 253 253 253 253
39613-253 253 253 253 253 253 253 253 253 253 253 253
39614-253 253 253 253 253 253 253 253 253 253 253 253
39615-242 242 242 82 82 82 18 14 6 163 110 8
39616-216 158 10 236 178 12 242 186 14 246 190 14
39617-246 190 14 246 190 14 246 190 14 246 190 14
39618-246 190 14 246 190 14 246 190 14 246 190 14
39619-246 190 14 246 190 14 246 190 14 246 190 14
39620-246 190 14 246 190 14 242 186 14 163 133 67
39621- 46 46 46 18 18 18 6 6 6 0 0 0
39622- 0 0 0 0 0 0 0 0 0 0 0 0
39623- 0 0 0 0 0 0 0 0 0 0 0 0
39624- 0 0 0 0 0 0 0 0 0 10 10 10
39625- 30 30 30 78 78 78 163 133 67 210 150 10
39626-236 178 12 246 186 14 246 190 14 246 190 14
39627-246 190 14 246 190 14 246 190 14 246 190 14
39628-246 190 14 246 190 14 246 190 14 246 190 14
39629-246 190 14 246 190 14 246 190 14 246 190 14
39630-241 196 14 215 174 15 190 178 144 253 253 253
39631-253 253 253 253 253 253 253 253 253 253 253 253
39632-253 253 253 253 253 253 253 253 253 253 253 253
39633-253 253 253 253 253 253 253 253 253 253 253 253
39634-253 253 253 253 253 253 253 253 253 218 218 218
39635- 58 58 58 2 2 6 22 18 6 167 114 7
39636-216 158 10 236 178 12 246 186 14 246 190 14
39637-246 190 14 246 190 14 246 190 14 246 190 14
39638-246 190 14 246 190 14 246 190 14 246 190 14
39639-246 190 14 246 190 14 246 190 14 246 190 14
39640-246 190 14 246 186 14 242 186 14 190 150 46
39641- 54 54 54 22 22 22 6 6 6 0 0 0
39642- 0 0 0 0 0 0 0 0 0 0 0 0
39643- 0 0 0 0 0 0 0 0 0 0 0 0
39644- 0 0 0 0 0 0 0 0 0 14 14 14
39645- 38 38 38 86 86 86 180 133 36 213 154 11
39646-236 178 12 246 186 14 246 190 14 246 190 14
39647-246 190 14 246 190 14 246 190 14 246 190 14
39648-246 190 14 246 190 14 246 190 14 246 190 14
39649-246 190 14 246 190 14 246 190 14 246 190 14
39650-246 190 14 232 195 16 190 146 13 214 214 214
39651-253 253 253 253 253 253 253 253 253 253 253 253
39652-253 253 253 253 253 253 253 253 253 253 253 253
39653-253 253 253 253 253 253 253 253 253 253 253 253
39654-253 253 253 250 250 250 170 170 170 26 26 26
39655- 2 2 6 2 2 6 37 26 9 163 110 8
39656-219 162 10 239 182 13 246 186 14 246 190 14
39657-246 190 14 246 190 14 246 190 14 246 190 14
39658-246 190 14 246 190 14 246 190 14 246 190 14
39659-246 190 14 246 190 14 246 190 14 246 190 14
39660-246 186 14 236 178 12 224 166 10 142 122 72
39661- 46 46 46 18 18 18 6 6 6 0 0 0
39662- 0 0 0 0 0 0 0 0 0 0 0 0
39663- 0 0 0 0 0 0 0 0 0 0 0 0
39664- 0 0 0 0 0 0 6 6 6 18 18 18
39665- 50 50 50 109 106 95 192 133 9 224 166 10
39666-242 186 14 246 190 14 246 190 14 246 190 14
39667-246 190 14 246 190 14 246 190 14 246 190 14
39668-246 190 14 246 190 14 246 190 14 246 190 14
39669-246 190 14 246 190 14 246 190 14 246 190 14
39670-242 186 14 226 184 13 210 162 10 142 110 46
39671-226 226 226 253 253 253 253 253 253 253 253 253
39672-253 253 253 253 253 253 253 253 253 253 253 253
39673-253 253 253 253 253 253 253 253 253 253 253 253
39674-198 198 198 66 66 66 2 2 6 2 2 6
39675- 2 2 6 2 2 6 50 34 6 156 107 11
39676-219 162 10 239 182 13 246 186 14 246 190 14
39677-246 190 14 246 190 14 246 190 14 246 190 14
39678-246 190 14 246 190 14 246 190 14 246 190 14
39679-246 190 14 246 190 14 246 190 14 242 186 14
39680-234 174 13 213 154 11 154 122 46 66 66 66
39681- 30 30 30 10 10 10 0 0 0 0 0 0
39682- 0 0 0 0 0 0 0 0 0 0 0 0
39683- 0 0 0 0 0 0 0 0 0 0 0 0
39684- 0 0 0 0 0 0 6 6 6 22 22 22
39685- 58 58 58 154 121 60 206 145 10 234 174 13
39686-242 186 14 246 186 14 246 190 14 246 190 14
39687-246 190 14 246 190 14 246 190 14 246 190 14
39688-246 190 14 246 190 14 246 190 14 246 190 14
39689-246 190 14 246 190 14 246 190 14 246 190 14
39690-246 186 14 236 178 12 210 162 10 163 110 8
39691- 61 42 6 138 138 138 218 218 218 250 250 250
39692-253 253 253 253 253 253 253 253 253 250 250 250
39693-242 242 242 210 210 210 144 144 144 66 66 66
39694- 6 6 6 2 2 6 2 2 6 2 2 6
39695- 2 2 6 2 2 6 61 42 6 163 110 8
39696-216 158 10 236 178 12 246 190 14 246 190 14
39697-246 190 14 246 190 14 246 190 14 246 190 14
39698-246 190 14 246 190 14 246 190 14 246 190 14
39699-246 190 14 239 182 13 230 174 11 216 158 10
39700-190 142 34 124 112 88 70 70 70 38 38 38
39701- 18 18 18 6 6 6 0 0 0 0 0 0
39702- 0 0 0 0 0 0 0 0 0 0 0 0
39703- 0 0 0 0 0 0 0 0 0 0 0 0
39704- 0 0 0 0 0 0 6 6 6 22 22 22
39705- 62 62 62 168 124 44 206 145 10 224 166 10
39706-236 178 12 239 182 13 242 186 14 242 186 14
39707-246 186 14 246 190 14 246 190 14 246 190 14
39708-246 190 14 246 190 14 246 190 14 246 190 14
39709-246 190 14 246 190 14 246 190 14 246 190 14
39710-246 190 14 236 178 12 216 158 10 175 118 6
39711- 80 54 7 2 2 6 6 6 6 30 30 30
39712- 54 54 54 62 62 62 50 50 50 38 38 38
39713- 14 14 14 2 2 6 2 2 6 2 2 6
39714- 2 2 6 2 2 6 2 2 6 2 2 6
39715- 2 2 6 6 6 6 80 54 7 167 114 7
39716-213 154 11 236 178 12 246 190 14 246 190 14
39717-246 190 14 246 190 14 246 190 14 246 190 14
39718-246 190 14 242 186 14 239 182 13 239 182 13
39719-230 174 11 210 150 10 174 135 50 124 112 88
39720- 82 82 82 54 54 54 34 34 34 18 18 18
39721- 6 6 6 0 0 0 0 0 0 0 0 0
39722- 0 0 0 0 0 0 0 0 0 0 0 0
39723- 0 0 0 0 0 0 0 0 0 0 0 0
39724- 0 0 0 0 0 0 6 6 6 18 18 18
39725- 50 50 50 158 118 36 192 133 9 200 144 11
39726-216 158 10 219 162 10 224 166 10 226 170 11
39727-230 174 11 236 178 12 239 182 13 239 182 13
39728-242 186 14 246 186 14 246 190 14 246 190 14
39729-246 190 14 246 190 14 246 190 14 246 190 14
39730-246 186 14 230 174 11 210 150 10 163 110 8
39731-104 69 6 10 10 10 2 2 6 2 2 6
39732- 2 2 6 2 2 6 2 2 6 2 2 6
39733- 2 2 6 2 2 6 2 2 6 2 2 6
39734- 2 2 6 2 2 6 2 2 6 2 2 6
39735- 2 2 6 6 6 6 91 60 6 167 114 7
39736-206 145 10 230 174 11 242 186 14 246 190 14
39737-246 190 14 246 190 14 246 186 14 242 186 14
39738-239 182 13 230 174 11 224 166 10 213 154 11
39739-180 133 36 124 112 88 86 86 86 58 58 58
39740- 38 38 38 22 22 22 10 10 10 6 6 6
39741- 0 0 0 0 0 0 0 0 0 0 0 0
39742- 0 0 0 0 0 0 0 0 0 0 0 0
39743- 0 0 0 0 0 0 0 0 0 0 0 0
39744- 0 0 0 0 0 0 0 0 0 14 14 14
39745- 34 34 34 70 70 70 138 110 50 158 118 36
39746-167 114 7 180 123 7 192 133 9 197 138 11
39747-200 144 11 206 145 10 213 154 11 219 162 10
39748-224 166 10 230 174 11 239 182 13 242 186 14
39749-246 186 14 246 186 14 246 186 14 246 186 14
39750-239 182 13 216 158 10 185 133 11 152 99 6
39751-104 69 6 18 14 6 2 2 6 2 2 6
39752- 2 2 6 2 2 6 2 2 6 2 2 6
39753- 2 2 6 2 2 6 2 2 6 2 2 6
39754- 2 2 6 2 2 6 2 2 6 2 2 6
39755- 2 2 6 6 6 6 80 54 7 152 99 6
39756-192 133 9 219 162 10 236 178 12 239 182 13
39757-246 186 14 242 186 14 239 182 13 236 178 12
39758-224 166 10 206 145 10 192 133 9 154 121 60
39759- 94 94 94 62 62 62 42 42 42 22 22 22
39760- 14 14 14 6 6 6 0 0 0 0 0 0
39761- 0 0 0 0 0 0 0 0 0 0 0 0
39762- 0 0 0 0 0 0 0 0 0 0 0 0
39763- 0 0 0 0 0 0 0 0 0 0 0 0
39764- 0 0 0 0 0 0 0 0 0 6 6 6
39765- 18 18 18 34 34 34 58 58 58 78 78 78
39766-101 98 89 124 112 88 142 110 46 156 107 11
39767-163 110 8 167 114 7 175 118 6 180 123 7
39768-185 133 11 197 138 11 210 150 10 219 162 10
39769-226 170 11 236 178 12 236 178 12 234 174 13
39770-219 162 10 197 138 11 163 110 8 130 83 6
39771- 91 60 6 10 10 10 2 2 6 2 2 6
39772- 18 18 18 38 38 38 38 38 38 38 38 38
39773- 38 38 38 38 38 38 38 38 38 38 38 38
39774- 38 38 38 38 38 38 26 26 26 2 2 6
39775- 2 2 6 6 6 6 70 47 6 137 92 6
39776-175 118 6 200 144 11 219 162 10 230 174 11
39777-234 174 13 230 174 11 219 162 10 210 150 10
39778-192 133 9 163 110 8 124 112 88 82 82 82
39779- 50 50 50 30 30 30 14 14 14 6 6 6
39780- 0 0 0 0 0 0 0 0 0 0 0 0
39781- 0 0 0 0 0 0 0 0 0 0 0 0
39782- 0 0 0 0 0 0 0 0 0 0 0 0
39783- 0 0 0 0 0 0 0 0 0 0 0 0
39784- 0 0 0 0 0 0 0 0 0 0 0 0
39785- 6 6 6 14 14 14 22 22 22 34 34 34
39786- 42 42 42 58 58 58 74 74 74 86 86 86
39787-101 98 89 122 102 70 130 98 46 121 87 25
39788-137 92 6 152 99 6 163 110 8 180 123 7
39789-185 133 11 197 138 11 206 145 10 200 144 11
39790-180 123 7 156 107 11 130 83 6 104 69 6
39791- 50 34 6 54 54 54 110 110 110 101 98 89
39792- 86 86 86 82 82 82 78 78 78 78 78 78
39793- 78 78 78 78 78 78 78 78 78 78 78 78
39794- 78 78 78 82 82 82 86 86 86 94 94 94
39795-106 106 106 101 101 101 86 66 34 124 80 6
39796-156 107 11 180 123 7 192 133 9 200 144 11
39797-206 145 10 200 144 11 192 133 9 175 118 6
39798-139 102 15 109 106 95 70 70 70 42 42 42
39799- 22 22 22 10 10 10 0 0 0 0 0 0
39800- 0 0 0 0 0 0 0 0 0 0 0 0
39801- 0 0 0 0 0 0 0 0 0 0 0 0
39802- 0 0 0 0 0 0 0 0 0 0 0 0
39803- 0 0 0 0 0 0 0 0 0 0 0 0
39804- 0 0 0 0 0 0 0 0 0 0 0 0
39805- 0 0 0 0 0 0 6 6 6 10 10 10
39806- 14 14 14 22 22 22 30 30 30 38 38 38
39807- 50 50 50 62 62 62 74 74 74 90 90 90
39808-101 98 89 112 100 78 121 87 25 124 80 6
39809-137 92 6 152 99 6 152 99 6 152 99 6
39810-138 86 6 124 80 6 98 70 6 86 66 30
39811-101 98 89 82 82 82 58 58 58 46 46 46
39812- 38 38 38 34 34 34 34 34 34 34 34 34
39813- 34 34 34 34 34 34 34 34 34 34 34 34
39814- 34 34 34 34 34 34 38 38 38 42 42 42
39815- 54 54 54 82 82 82 94 86 76 91 60 6
39816-134 86 6 156 107 11 167 114 7 175 118 6
39817-175 118 6 167 114 7 152 99 6 121 87 25
39818-101 98 89 62 62 62 34 34 34 18 18 18
39819- 6 6 6 0 0 0 0 0 0 0 0 0
39820- 0 0 0 0 0 0 0 0 0 0 0 0
39821- 0 0 0 0 0 0 0 0 0 0 0 0
39822- 0 0 0 0 0 0 0 0 0 0 0 0
39823- 0 0 0 0 0 0 0 0 0 0 0 0
39824- 0 0 0 0 0 0 0 0 0 0 0 0
39825- 0 0 0 0 0 0 0 0 0 0 0 0
39826- 0 0 0 6 6 6 6 6 6 10 10 10
39827- 18 18 18 22 22 22 30 30 30 42 42 42
39828- 50 50 50 66 66 66 86 86 86 101 98 89
39829-106 86 58 98 70 6 104 69 6 104 69 6
39830-104 69 6 91 60 6 82 62 34 90 90 90
39831- 62 62 62 38 38 38 22 22 22 14 14 14
39832- 10 10 10 10 10 10 10 10 10 10 10 10
39833- 10 10 10 10 10 10 6 6 6 10 10 10
39834- 10 10 10 10 10 10 10 10 10 14 14 14
39835- 22 22 22 42 42 42 70 70 70 89 81 66
39836- 80 54 7 104 69 6 124 80 6 137 92 6
39837-134 86 6 116 81 8 100 82 52 86 86 86
39838- 58 58 58 30 30 30 14 14 14 6 6 6
39839- 0 0 0 0 0 0 0 0 0 0 0 0
39840- 0 0 0 0 0 0 0 0 0 0 0 0
39841- 0 0 0 0 0 0 0 0 0 0 0 0
39842- 0 0 0 0 0 0 0 0 0 0 0 0
39843- 0 0 0 0 0 0 0 0 0 0 0 0
39844- 0 0 0 0 0 0 0 0 0 0 0 0
39845- 0 0 0 0 0 0 0 0 0 0 0 0
39846- 0 0 0 0 0 0 0 0 0 0 0 0
39847- 0 0 0 6 6 6 10 10 10 14 14 14
39848- 18 18 18 26 26 26 38 38 38 54 54 54
39849- 70 70 70 86 86 86 94 86 76 89 81 66
39850- 89 81 66 86 86 86 74 74 74 50 50 50
39851- 30 30 30 14 14 14 6 6 6 0 0 0
39852- 0 0 0 0 0 0 0 0 0 0 0 0
39853- 0 0 0 0 0 0 0 0 0 0 0 0
39854- 0 0 0 0 0 0 0 0 0 0 0 0
39855- 6 6 6 18 18 18 34 34 34 58 58 58
39856- 82 82 82 89 81 66 89 81 66 89 81 66
39857- 94 86 66 94 86 76 74 74 74 50 50 50
39858- 26 26 26 14 14 14 6 6 6 0 0 0
39859- 0 0 0 0 0 0 0 0 0 0 0 0
39860- 0 0 0 0 0 0 0 0 0 0 0 0
39861- 0 0 0 0 0 0 0 0 0 0 0 0
39862- 0 0 0 0 0 0 0 0 0 0 0 0
39863- 0 0 0 0 0 0 0 0 0 0 0 0
39864- 0 0 0 0 0 0 0 0 0 0 0 0
39865- 0 0 0 0 0 0 0 0 0 0 0 0
39866- 0 0 0 0 0 0 0 0 0 0 0 0
39867- 0 0 0 0 0 0 0 0 0 0 0 0
39868- 6 6 6 6 6 6 14 14 14 18 18 18
39869- 30 30 30 38 38 38 46 46 46 54 54 54
39870- 50 50 50 42 42 42 30 30 30 18 18 18
39871- 10 10 10 0 0 0 0 0 0 0 0 0
39872- 0 0 0 0 0 0 0 0 0 0 0 0
39873- 0 0 0 0 0 0 0 0 0 0 0 0
39874- 0 0 0 0 0 0 0 0 0 0 0 0
39875- 0 0 0 6 6 6 14 14 14 26 26 26
39876- 38 38 38 50 50 50 58 58 58 58 58 58
39877- 54 54 54 42 42 42 30 30 30 18 18 18
39878- 10 10 10 0 0 0 0 0 0 0 0 0
39879- 0 0 0 0 0 0 0 0 0 0 0 0
39880- 0 0 0 0 0 0 0 0 0 0 0 0
39881- 0 0 0 0 0 0 0 0 0 0 0 0
39882- 0 0 0 0 0 0 0 0 0 0 0 0
39883- 0 0 0 0 0 0 0 0 0 0 0 0
39884- 0 0 0 0 0 0 0 0 0 0 0 0
39885- 0 0 0 0 0 0 0 0 0 0 0 0
39886- 0 0 0 0 0 0 0 0 0 0 0 0
39887- 0 0 0 0 0 0 0 0 0 0 0 0
39888- 0 0 0 0 0 0 0 0 0 6 6 6
39889- 6 6 6 10 10 10 14 14 14 18 18 18
39890- 18 18 18 14 14 14 10 10 10 6 6 6
39891- 0 0 0 0 0 0 0 0 0 0 0 0
39892- 0 0 0 0 0 0 0 0 0 0 0 0
39893- 0 0 0 0 0 0 0 0 0 0 0 0
39894- 0 0 0 0 0 0 0 0 0 0 0 0
39895- 0 0 0 0 0 0 0 0 0 6 6 6
39896- 14 14 14 18 18 18 22 22 22 22 22 22
39897- 18 18 18 14 14 14 10 10 10 6 6 6
39898- 0 0 0 0 0 0 0 0 0 0 0 0
39899- 0 0 0 0 0 0 0 0 0 0 0 0
39900- 0 0 0 0 0 0 0 0 0 0 0 0
39901- 0 0 0 0 0 0 0 0 0 0 0 0
39902- 0 0 0 0 0 0 0 0 0 0 0 0
39903+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39904+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39905+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39908+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39909+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39913+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39914+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39916+4 4 4 4 4 4
39917+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39918+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39919+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39922+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39923+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39924+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39925+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39926+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39927+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39928+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39930+4 4 4 4 4 4
39931+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39932+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39933+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39938+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39940+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39941+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39942+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39944+4 4 4 4 4 4
39945+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39946+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39947+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39948+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39949+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39950+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39951+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39952+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39953+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39954+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39955+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39956+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39958+4 4 4 4 4 4
39959+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39960+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39961+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39969+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39972+4 4 4 4 4 4
39973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39974+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39975+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39983+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39986+4 4 4 4 4 4
39987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39988+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39991+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
39992+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
39993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39996+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
39997+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
39998+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
39999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40000+4 4 4 4 4 4
40001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40002+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40005+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
40006+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
40007+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40010+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
40011+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
40012+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
40013+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40014+4 4 4 4 4 4
40015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40019+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
40020+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
40021+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40024+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
40025+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
40026+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
40027+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
40028+4 4 4 4 4 4
40029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40032+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
40033+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
40034+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
40035+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
40036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40037+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40038+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
40039+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
40040+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
40041+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
40042+4 4 4 4 4 4
40043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40046+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
40047+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
40048+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
40049+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
40050+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40051+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
40052+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
40053+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
40054+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
40055+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
40056+4 4 4 4 4 4
40057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40060+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
40061+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
40062+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
40063+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
40064+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
40065+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
40066+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
40067+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
40068+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
40069+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
40070+4 4 4 4 4 4
40071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40073+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
40074+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
40075+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
40076+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
40077+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
40078+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
40079+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
40080+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
40081+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
40082+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
40083+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
40084+4 4 4 4 4 4
40085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40087+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
40088+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
40089+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
40090+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
40091+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
40092+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
40093+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
40094+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
40095+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
40096+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
40097+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
40098+4 4 4 4 4 4
40099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40101+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
40102+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
40103+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
40104+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
40105+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
40106+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
40107+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
40108+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
40109+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
40110+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
40111+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40112+4 4 4 4 4 4
40113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40115+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
40116+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
40117+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
40118+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
40119+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
40120+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
40121+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
40122+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
40123+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
40124+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
40125+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
40126+4 4 4 4 4 4
40127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40128+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
40129+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
40130+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
40131+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
40132+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
40133+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
40134+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
40135+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
40136+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
40137+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
40138+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
40139+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
40140+4 4 4 4 4 4
40141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40142+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
40143+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
40144+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
40145+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40146+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
40147+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
40148+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
40149+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
40150+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
40151+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
40152+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
40153+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
40154+0 0 0 4 4 4
40155+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40156+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
40157+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
40158+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
40159+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
40160+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
40161+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
40162+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
40163+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
40164+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
40165+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
40166+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
40167+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
40168+2 0 0 0 0 0
40169+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
40170+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
40171+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
40172+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
40173+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
40174+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
40175+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
40176+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
40177+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
40178+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
40179+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
40180+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
40181+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
40182+37 38 37 0 0 0
40183+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40184+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
40185+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
40186+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
40187+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
40188+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
40189+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
40190+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
40191+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
40192+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
40193+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
40194+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
40195+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
40196+85 115 134 4 0 0
40197+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
40198+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
40199+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
40200+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
40201+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
40202+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
40203+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
40204+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
40205+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
40206+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
40207+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
40208+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
40209+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
40210+60 73 81 4 0 0
40211+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
40212+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
40213+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
40214+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
40215+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
40216+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
40217+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
40218+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
40219+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
40220+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
40221+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
40222+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
40223+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
40224+16 19 21 4 0 0
40225+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
40226+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
40227+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
40228+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
40229+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
40230+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
40231+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
40232+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
40233+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
40234+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
40235+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
40236+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
40237+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
40238+4 0 0 4 3 3
40239+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
40240+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
40241+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
40242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
40243+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
40244+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
40245+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
40246+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
40247+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
40248+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
40249+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
40250+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
40251+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
40252+3 2 2 4 4 4
40253+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
40254+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
40255+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
40256+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40257+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
40258+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
40259+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
40260+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
40261+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
40262+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
40263+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
40264+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
40265+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
40266+4 4 4 4 4 4
40267+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
40268+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
40269+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
40270+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
40271+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
40272+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
40273+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
40274+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
40275+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
40276+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
40277+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
40278+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
40279+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
40280+4 4 4 4 4 4
40281+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
40282+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
40283+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
40284+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
40285+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
40286+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40287+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
40288+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
40289+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
40290+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
40291+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
40292+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
40293+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
40294+5 5 5 5 5 5
40295+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
40296+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
40297+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
40298+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
40299+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
40300+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40301+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
40302+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
40303+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
40304+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
40305+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
40306+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
40307+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40308+5 5 5 4 4 4
40309+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
40310+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
40311+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
40312+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
40313+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40314+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
40315+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
40316+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
40317+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
40318+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
40319+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
40320+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40321+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40322+4 4 4 4 4 4
40323+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
40324+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
40325+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
40326+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
40327+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
40328+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40329+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40330+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
40331+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
40332+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
40333+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
40334+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
40335+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40336+4 4 4 4 4 4
40337+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
40338+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
40339+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
40340+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
40341+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40342+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
40343+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
40344+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
40345+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
40346+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
40347+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
40348+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40349+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40350+4 4 4 4 4 4
40351+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
40352+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
40353+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
40354+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
40355+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40356+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40357+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40358+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
40359+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
40360+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
40361+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
40362+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40363+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40364+4 4 4 4 4 4
40365+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
40366+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
40367+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
40368+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
40369+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40370+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
40371+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40372+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
40373+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
40374+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
40375+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40376+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40377+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40378+4 4 4 4 4 4
40379+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
40380+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
40381+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
40382+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
40383+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40384+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
40385+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
40386+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
40387+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
40388+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
40389+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
40390+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40391+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40392+4 4 4 4 4 4
40393+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
40394+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
40395+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
40396+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
40397+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40398+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
40399+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
40400+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
40401+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
40402+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
40403+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
40404+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40406+4 4 4 4 4 4
40407+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
40408+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
40409+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
40410+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40411+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
40412+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
40413+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
40414+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
40415+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
40416+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
40417+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40418+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40420+4 4 4 4 4 4
40421+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
40422+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
40423+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
40424+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40425+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40426+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
40427+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
40428+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
40429+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
40430+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
40431+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40434+4 4 4 4 4 4
40435+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
40436+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
40437+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40438+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40439+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40440+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
40441+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
40442+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
40443+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
40444+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
40445+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40448+4 4 4 4 4 4
40449+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
40450+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
40451+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40452+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40453+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40454+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
40455+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
40456+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
40457+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40458+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40459+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40462+4 4 4 4 4 4
40463+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40464+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
40465+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40466+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
40467+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
40468+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
40469+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
40470+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
40471+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40472+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40473+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40475+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40476+4 4 4 4 4 4
40477+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40478+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
40479+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40480+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
40481+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40482+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
40483+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
40484+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
40485+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40486+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40487+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40489+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40490+4 4 4 4 4 4
40491+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
40492+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
40493+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40494+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
40495+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
40496+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
40497+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
40498+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
40499+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40500+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40501+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40503+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40504+4 4 4 4 4 4
40505+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
40506+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
40507+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40508+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
40509+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
40510+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
40511+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
40512+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
40513+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40514+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40515+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40517+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40518+4 4 4 4 4 4
40519+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40520+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
40521+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40522+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
40523+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
40524+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
40525+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
40526+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
40527+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40528+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40529+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40530+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40531+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40532+4 4 4 4 4 4
40533+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
40534+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
40535+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40536+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
40537+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
40538+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
40539+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
40540+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
40541+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
40542+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40543+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40545+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40546+4 4 4 4 4 4
40547+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40548+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
40549+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
40550+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
40551+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
40552+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
40553+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
40554+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
40555+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40556+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40557+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40558+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40559+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40560+4 4 4 4 4 4
40561+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40562+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
40563+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40564+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
40565+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
40566+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
40567+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
40568+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
40569+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40570+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40571+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40574+4 4 4 4 4 4
40575+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40576+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
40577+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
40578+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
40579+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
40580+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
40581+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40582+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
40583+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40584+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40585+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40587+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40588+4 4 4 4 4 4
40589+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40590+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
40591+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
40592+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40593+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
40594+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
40595+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40596+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
40597+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40598+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40599+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40601+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40602+4 4 4 4 4 4
40603+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40604+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
40605+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
40606+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
40607+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
40608+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
40609+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
40610+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
40611+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
40612+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40613+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40615+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40616+4 4 4 4 4 4
40617+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40618+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
40619+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
40620+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
40621+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
40622+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
40623+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
40624+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
40625+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
40626+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40627+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40629+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40630+4 4 4 4 4 4
40631+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
40632+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
40633+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
40634+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
40635+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40636+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
40637+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
40638+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
40639+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
40640+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40641+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40643+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40644+4 4 4 4 4 4
40645+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40646+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
40647+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
40648+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
40649+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
40650+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
40651+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
40652+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
40653+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
40654+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40655+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40657+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40658+4 4 4 4 4 4
40659+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
40660+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
40661+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
40662+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
40663+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
40664+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
40665+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
40666+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
40667+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
40668+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
40669+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40671+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40672+4 4 4 4 4 4
40673+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
40674+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
40675+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
40676+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
40677+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
40678+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
40679+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
40680+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
40681+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
40682+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
40683+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40685+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40686+4 4 4 4 4 4
40687+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
40688+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
40689+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
40690+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
40691+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
40692+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
40693+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40694+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
40695+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
40696+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
40697+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40699+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40700+4 4 4 4 4 4
40701+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
40702+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
40703+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
40704+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
40705+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
40706+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
40707+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
40708+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
40709+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
40710+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
40711+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40713+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40714+4 4 4 4 4 4
40715+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
40716+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
40717+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
40718+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
40719+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
40720+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
40721+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
40722+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
40723+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
40724+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
40725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40727+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40728+4 4 4 4 4 4
40729+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40730+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
40731+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
40732+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
40733+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
40734+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
40735+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
40736+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
40737+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
40738+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
40739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40741+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40742+4 4 4 4 4 4
40743+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
40744+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
40745+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
40746+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
40747+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
40748+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
40749+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
40750+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
40751+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
40752+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40755+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40756+4 4 4 4 4 4
40757+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
40758+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
40759+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
40760+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
40761+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
40762+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
40763+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
40764+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
40765+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40766+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40769+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40770+4 4 4 4 4 4
40771+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
40772+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40773+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
40774+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
40775+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
40776+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
40777+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
40778+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
40779+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
40780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40784+4 4 4 4 4 4
40785+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
40786+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
40787+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
40788+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
40789+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
40790+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
40791+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
40792+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
40793+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
40794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40798+4 4 4 4 4 4
40799+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
40800+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
40801+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
40802+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
40803+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
40804+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
40805+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
40806+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
40807+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40812+4 4 4 4 4 4
40813+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
40814+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
40815+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
40816+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
40817+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
40818+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
40819+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
40820+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
40821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40826+4 4 4 4 4 4
40827+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
40828+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
40829+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
40830+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
40831+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
40832+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
40833+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
40834+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
40835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40840+4 4 4 4 4 4
40841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40842+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
40843+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40844+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
40845+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
40846+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
40847+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
40848+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
40849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40854+4 4 4 4 4 4
40855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40856+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
40857+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
40858+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
40859+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
40860+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
40861+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
40862+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
40863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40867+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40868+4 4 4 4 4 4
40869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40870+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40871+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
40872+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
40873+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
40874+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
40875+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
40876+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40877+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40881+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40882+4 4 4 4 4 4
40883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40885+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
40886+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
40887+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
40888+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
40889+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
40890+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40891+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40894+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40895+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40896+4 4 4 4 4 4
40897+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40899+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40900+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40901+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
40902+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
40903+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
40904+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40905+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40908+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40909+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40910+4 4 4 4 4 4
40911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40913+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40914+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
40915+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
40916+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
40917+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
40918+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40919+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40922+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40923+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40924+4 4 4 4 4 4
40925+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40926+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40927+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40928+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
40929+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40930+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
40931+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
40932+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40933+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40938+4 4 4 4 4 4
40939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40940+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40941+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40942+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
40943+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
40944+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
40945+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
40946+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40947+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40948+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40949+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40950+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40951+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40952+4 4 4 4 4 4
40953+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40954+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40955+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40956+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40957+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
40958+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40959+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40960+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40961+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40966+4 4 4 4 4 4
40967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40969+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40971+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
40972+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
40973+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
40974+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40975+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40980+4 4 4 4 4 4
40981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40983+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40985+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
40986+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
40987+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40988+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40994+4 4 4 4 4 4
40995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40997+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40999+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
41000+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
41001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41002+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41008+4 4 4 4 4 4
41009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41013+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41014+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
41015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41022+4 4 4 4 4 4
41023diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
41024index a159b63..4ab532d 100644
41025--- a/drivers/video/udlfb.c
41026+++ b/drivers/video/udlfb.c
41027@@ -620,11 +620,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
41028 dlfb_urb_completion(urb);
41029
41030 error:
41031- atomic_add(bytes_sent, &dev->bytes_sent);
41032- atomic_add(bytes_identical, &dev->bytes_identical);
41033- atomic_add(width*height*2, &dev->bytes_rendered);
41034+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41035+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41036+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
41037 end_cycles = get_cycles();
41038- atomic_add(((unsigned int) ((end_cycles - start_cycles)
41039+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41040 >> 10)), /* Kcycles */
41041 &dev->cpu_kcycles_used);
41042
41043@@ -745,11 +745,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
41044 dlfb_urb_completion(urb);
41045
41046 error:
41047- atomic_add(bytes_sent, &dev->bytes_sent);
41048- atomic_add(bytes_identical, &dev->bytes_identical);
41049- atomic_add(bytes_rendered, &dev->bytes_rendered);
41050+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41051+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41052+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
41053 end_cycles = get_cycles();
41054- atomic_add(((unsigned int) ((end_cycles - start_cycles)
41055+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41056 >> 10)), /* Kcycles */
41057 &dev->cpu_kcycles_used);
41058 }
41059@@ -1373,7 +1373,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
41060 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41061 struct dlfb_data *dev = fb_info->par;
41062 return snprintf(buf, PAGE_SIZE, "%u\n",
41063- atomic_read(&dev->bytes_rendered));
41064+ atomic_read_unchecked(&dev->bytes_rendered));
41065 }
41066
41067 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41068@@ -1381,7 +1381,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41069 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41070 struct dlfb_data *dev = fb_info->par;
41071 return snprintf(buf, PAGE_SIZE, "%u\n",
41072- atomic_read(&dev->bytes_identical));
41073+ atomic_read_unchecked(&dev->bytes_identical));
41074 }
41075
41076 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41077@@ -1389,7 +1389,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41078 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41079 struct dlfb_data *dev = fb_info->par;
41080 return snprintf(buf, PAGE_SIZE, "%u\n",
41081- atomic_read(&dev->bytes_sent));
41082+ atomic_read_unchecked(&dev->bytes_sent));
41083 }
41084
41085 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41086@@ -1397,7 +1397,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41087 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41088 struct dlfb_data *dev = fb_info->par;
41089 return snprintf(buf, PAGE_SIZE, "%u\n",
41090- atomic_read(&dev->cpu_kcycles_used));
41091+ atomic_read_unchecked(&dev->cpu_kcycles_used));
41092 }
41093
41094 static ssize_t edid_show(
41095@@ -1457,10 +1457,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
41096 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41097 struct dlfb_data *dev = fb_info->par;
41098
41099- atomic_set(&dev->bytes_rendered, 0);
41100- atomic_set(&dev->bytes_identical, 0);
41101- atomic_set(&dev->bytes_sent, 0);
41102- atomic_set(&dev->cpu_kcycles_used, 0);
41103+ atomic_set_unchecked(&dev->bytes_rendered, 0);
41104+ atomic_set_unchecked(&dev->bytes_identical, 0);
41105+ atomic_set_unchecked(&dev->bytes_sent, 0);
41106+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
41107
41108 return count;
41109 }
41110diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
41111index b0e2a42..e2df3ad 100644
41112--- a/drivers/video/uvesafb.c
41113+++ b/drivers/video/uvesafb.c
41114@@ -19,6 +19,7 @@
41115 #include <linux/io.h>
41116 #include <linux/mutex.h>
41117 #include <linux/slab.h>
41118+#include <linux/moduleloader.h>
41119 #include <video/edid.h>
41120 #include <video/uvesafb.h>
41121 #ifdef CONFIG_X86
41122@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
41123 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
41124 par->pmi_setpal = par->ypan = 0;
41125 } else {
41126+
41127+#ifdef CONFIG_PAX_KERNEXEC
41128+#ifdef CONFIG_MODULES
41129+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
41130+#endif
41131+ if (!par->pmi_code) {
41132+ par->pmi_setpal = par->ypan = 0;
41133+ return 0;
41134+ }
41135+#endif
41136+
41137 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
41138 + task->t.regs.edi);
41139+
41140+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41141+ pax_open_kernel();
41142+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
41143+ pax_close_kernel();
41144+
41145+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
41146+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
41147+#else
41148 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
41149 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
41150+#endif
41151+
41152 printk(KERN_INFO "uvesafb: protected mode interface info at "
41153 "%04x:%04x\n",
41154 (u16)task->t.regs.es, (u16)task->t.regs.edi);
41155@@ -816,13 +839,14 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
41156 par->ypan = ypan;
41157
41158 if (par->pmi_setpal || par->ypan) {
41159+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
41160 if (__supported_pte_mask & _PAGE_NX) {
41161 par->pmi_setpal = par->ypan = 0;
41162 printk(KERN_WARNING "uvesafb: NX protection is actively."
41163 "We have better not to use the PMI.\n");
41164- } else {
41165+ } else
41166+#endif
41167 uvesafb_vbe_getpmi(task, par);
41168- }
41169 }
41170 #else
41171 /* The protected mode interface is not available on non-x86. */
41172@@ -1836,6 +1860,11 @@ out:
41173 if (par->vbe_modes)
41174 kfree(par->vbe_modes);
41175
41176+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41177+ if (par->pmi_code)
41178+ module_free_exec(NULL, par->pmi_code);
41179+#endif
41180+
41181 framebuffer_release(info);
41182 return err;
41183 }
41184@@ -1862,6 +1891,12 @@ static int uvesafb_remove(struct platform_device *dev)
41185 kfree(par->vbe_state_orig);
41186 if (par->vbe_state_saved)
41187 kfree(par->vbe_state_saved);
41188+
41189+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41190+ if (par->pmi_code)
41191+ module_free_exec(NULL, par->pmi_code);
41192+#endif
41193+
41194 }
41195
41196 framebuffer_release(info);
41197diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
41198index 501b340..86bd4cf 100644
41199--- a/drivers/video/vesafb.c
41200+++ b/drivers/video/vesafb.c
41201@@ -9,6 +9,7 @@
41202 */
41203
41204 #include <linux/module.h>
41205+#include <linux/moduleloader.h>
41206 #include <linux/kernel.h>
41207 #include <linux/errno.h>
41208 #include <linux/string.h>
41209@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
41210 static int vram_total __initdata; /* Set total amount of memory */
41211 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
41212 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
41213-static void (*pmi_start)(void) __read_mostly;
41214-static void (*pmi_pal) (void) __read_mostly;
41215+static void (*pmi_start)(void) __read_only;
41216+static void (*pmi_pal) (void) __read_only;
41217 static int depth __read_mostly;
41218 static int vga_compat __read_mostly;
41219 /* --------------------------------------------------------------------- */
41220@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
41221 unsigned int size_vmode;
41222 unsigned int size_remap;
41223 unsigned int size_total;
41224+ void *pmi_code = NULL;
41225
41226 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
41227 return -ENODEV;
41228@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
41229 size_remap = size_total;
41230 vesafb_fix.smem_len = size_remap;
41231
41232-#ifndef __i386__
41233- screen_info.vesapm_seg = 0;
41234-#endif
41235-
41236 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
41237 printk(KERN_WARNING
41238 "vesafb: cannot reserve video memory at 0x%lx\n",
41239@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
41240 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
41241 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
41242
41243+#ifdef __i386__
41244+
41245+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41246+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
41247+ if (!pmi_code)
41248+#elif !defined(CONFIG_PAX_KERNEXEC)
41249+ if (0)
41250+#endif
41251+
41252+#endif
41253+ screen_info.vesapm_seg = 0;
41254+
41255 if (screen_info.vesapm_seg) {
41256- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
41257- screen_info.vesapm_seg,screen_info.vesapm_off);
41258+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
41259+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
41260 }
41261
41262 if (screen_info.vesapm_seg < 0xc000)
41263@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
41264
41265 if (ypan || pmi_setpal) {
41266 unsigned short *pmi_base;
41267+
41268 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
41269- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
41270- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
41271+
41272+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41273+ pax_open_kernel();
41274+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
41275+#else
41276+ pmi_code = pmi_base;
41277+#endif
41278+
41279+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
41280+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
41281+
41282+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41283+ pmi_start = ktva_ktla(pmi_start);
41284+ pmi_pal = ktva_ktla(pmi_pal);
41285+ pax_close_kernel();
41286+#endif
41287+
41288 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
41289 if (pmi_base[3]) {
41290 printk(KERN_INFO "vesafb: pmi: ports = ");
41291@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
41292 info->node, info->fix.id);
41293 return 0;
41294 err:
41295+
41296+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41297+ module_free_exec(NULL, pmi_code);
41298+#endif
41299+
41300 if (info->screen_base)
41301 iounmap(info->screen_base);
41302 framebuffer_release(info);
41303diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
41304index 88714ae..16c2e11 100644
41305--- a/drivers/video/via/via_clock.h
41306+++ b/drivers/video/via/via_clock.h
41307@@ -56,7 +56,7 @@ struct via_clock {
41308
41309 void (*set_engine_pll_state)(u8 state);
41310 void (*set_engine_pll)(struct via_pll_config config);
41311-};
41312+} __no_const;
41313
41314
41315 static inline u32 get_pll_internal_frequency(u32 ref_freq,
41316diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
41317index e56c934..fc22f4b 100644
41318--- a/drivers/xen/xen-pciback/conf_space.h
41319+++ b/drivers/xen/xen-pciback/conf_space.h
41320@@ -44,15 +44,15 @@ struct config_field {
41321 struct {
41322 conf_dword_write write;
41323 conf_dword_read read;
41324- } dw;
41325+ } __no_const dw;
41326 struct {
41327 conf_word_write write;
41328 conf_word_read read;
41329- } w;
41330+ } __no_const w;
41331 struct {
41332 conf_byte_write write;
41333 conf_byte_read read;
41334- } b;
41335+ } __no_const b;
41336 } u;
41337 struct list_head list;
41338 };
41339diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
41340index 014c8dd..6f3dfe6 100644
41341--- a/fs/9p/vfs_inode.c
41342+++ b/fs/9p/vfs_inode.c
41343@@ -1303,7 +1303,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41344 void
41345 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41346 {
41347- char *s = nd_get_link(nd);
41348+ const char *s = nd_get_link(nd);
41349
41350 p9_debug(P9_DEBUG_VFS, " %s %s\n",
41351 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
41352diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
41353index e95d1b6..3454244 100644
41354--- a/fs/Kconfig.binfmt
41355+++ b/fs/Kconfig.binfmt
41356@@ -89,7 +89,7 @@ config HAVE_AOUT
41357
41358 config BINFMT_AOUT
41359 tristate "Kernel support for a.out and ECOFF binaries"
41360- depends on HAVE_AOUT
41361+ depends on HAVE_AOUT && BROKEN
41362 ---help---
41363 A.out (Assembler.OUTput) is a set of formats for libraries and
41364 executables used in the earliest versions of UNIX. Linux used
41365diff --git a/fs/aio.c b/fs/aio.c
41366index e7f2fad..15ad8a4 100644
41367--- a/fs/aio.c
41368+++ b/fs/aio.c
41369@@ -118,7 +118,7 @@ static int aio_setup_ring(struct kioctx *ctx)
41370 size += sizeof(struct io_event) * nr_events;
41371 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
41372
41373- if (nr_pages < 0)
41374+ if (nr_pages <= 0)
41375 return -EINVAL;
41376
41377 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
41378@@ -1440,18 +1440,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
41379 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
41380 {
41381 ssize_t ret;
41382+ struct iovec iovstack;
41383
41384 #ifdef CONFIG_COMPAT
41385 if (compat)
41386 ret = compat_rw_copy_check_uvector(type,
41387 (struct compat_iovec __user *)kiocb->ki_buf,
41388- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41389+ kiocb->ki_nbytes, 1, &iovstack,
41390 &kiocb->ki_iovec, 1);
41391 else
41392 #endif
41393 ret = rw_copy_check_uvector(type,
41394 (struct iovec __user *)kiocb->ki_buf,
41395- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41396+ kiocb->ki_nbytes, 1, &iovstack,
41397 &kiocb->ki_iovec, 1);
41398 if (ret < 0)
41399 goto out;
41400@@ -1460,6 +1461,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
41401 if (ret < 0)
41402 goto out;
41403
41404+ if (kiocb->ki_iovec == &iovstack) {
41405+ kiocb->ki_inline_vec = iovstack;
41406+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
41407+ }
41408 kiocb->ki_nr_segs = kiocb->ki_nbytes;
41409 kiocb->ki_cur_seg = 0;
41410 /* ki_nbytes/left now reflect bytes instead of segs */
41411diff --git a/fs/attr.c b/fs/attr.c
41412index d94d1b6..f9bccd6 100644
41413--- a/fs/attr.c
41414+++ b/fs/attr.c
41415@@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
41416 unsigned long limit;
41417
41418 limit = rlimit(RLIMIT_FSIZE);
41419+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
41420 if (limit != RLIM_INFINITY && offset > limit)
41421 goto out_sig;
41422 if (offset > inode->i_sb->s_maxbytes)
41423diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
41424index da8876d..9f3e6d8 100644
41425--- a/fs/autofs4/waitq.c
41426+++ b/fs/autofs4/waitq.c
41427@@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
41428 {
41429 unsigned long sigpipe, flags;
41430 mm_segment_t fs;
41431- const char *data = (const char *)addr;
41432+ const char __user *data = (const char __force_user *)addr;
41433 ssize_t wr = 0;
41434
41435 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
41436diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
41437index e18da23..affc30e 100644
41438--- a/fs/befs/linuxvfs.c
41439+++ b/fs/befs/linuxvfs.c
41440@@ -502,7 +502,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41441 {
41442 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
41443 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
41444- char *link = nd_get_link(nd);
41445+ const char *link = nd_get_link(nd);
41446 if (!IS_ERR(link))
41447 kfree(link);
41448 }
41449diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
41450index d146e18..12d1bd1 100644
41451--- a/fs/binfmt_aout.c
41452+++ b/fs/binfmt_aout.c
41453@@ -16,6 +16,7 @@
41454 #include <linux/string.h>
41455 #include <linux/fs.h>
41456 #include <linux/file.h>
41457+#include <linux/security.h>
41458 #include <linux/stat.h>
41459 #include <linux/fcntl.h>
41460 #include <linux/ptrace.h>
41461@@ -83,6 +84,8 @@ static int aout_core_dump(struct coredump_params *cprm)
41462 #endif
41463 # define START_STACK(u) ((void __user *)u.start_stack)
41464
41465+ memset(&dump, 0, sizeof(dump));
41466+
41467 fs = get_fs();
41468 set_fs(KERNEL_DS);
41469 has_dumped = 1;
41470@@ -94,10 +97,12 @@ static int aout_core_dump(struct coredump_params *cprm)
41471
41472 /* If the size of the dump file exceeds the rlimit, then see what would happen
41473 if we wrote the stack, but not the data area. */
41474+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
41475 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
41476 dump.u_dsize = 0;
41477
41478 /* Make sure we have enough room to write the stack and data areas. */
41479+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
41480 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
41481 dump.u_ssize = 0;
41482
41483@@ -231,6 +236,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41484 rlim = rlimit(RLIMIT_DATA);
41485 if (rlim >= RLIM_INFINITY)
41486 rlim = ~0;
41487+
41488+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
41489 if (ex.a_data + ex.a_bss > rlim)
41490 return -ENOMEM;
41491
41492@@ -265,6 +272,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41493
41494 install_exec_creds(bprm);
41495
41496+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41497+ current->mm->pax_flags = 0UL;
41498+#endif
41499+
41500+#ifdef CONFIG_PAX_PAGEEXEC
41501+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
41502+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
41503+
41504+#ifdef CONFIG_PAX_EMUTRAMP
41505+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
41506+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
41507+#endif
41508+
41509+#ifdef CONFIG_PAX_MPROTECT
41510+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
41511+ current->mm->pax_flags |= MF_PAX_MPROTECT;
41512+#endif
41513+
41514+ }
41515+#endif
41516+
41517 if (N_MAGIC(ex) == OMAGIC) {
41518 unsigned long text_addr, map_size;
41519 loff_t pos;
41520@@ -330,7 +358,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41521 }
41522
41523 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
41524- PROT_READ | PROT_WRITE | PROT_EXEC,
41525+ PROT_READ | PROT_WRITE,
41526 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
41527 fd_offset + ex.a_text);
41528 if (error != N_DATADDR(ex)) {
41529diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
41530index 16f7354..185d8dc 100644
41531--- a/fs/binfmt_elf.c
41532+++ b/fs/binfmt_elf.c
41533@@ -32,6 +32,7 @@
41534 #include <linux/elf.h>
41535 #include <linux/utsname.h>
41536 #include <linux/coredump.h>
41537+#include <linux/xattr.h>
41538 #include <asm/uaccess.h>
41539 #include <asm/param.h>
41540 #include <asm/page.h>
41541@@ -52,6 +53,10 @@ static int elf_core_dump(struct coredump_params *cprm);
41542 #define elf_core_dump NULL
41543 #endif
41544
41545+#ifdef CONFIG_PAX_MPROTECT
41546+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
41547+#endif
41548+
41549 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
41550 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
41551 #else
41552@@ -71,6 +76,11 @@ static struct linux_binfmt elf_format = {
41553 .load_binary = load_elf_binary,
41554 .load_shlib = load_elf_library,
41555 .core_dump = elf_core_dump,
41556+
41557+#ifdef CONFIG_PAX_MPROTECT
41558+ .handle_mprotect= elf_handle_mprotect,
41559+#endif
41560+
41561 .min_coredump = ELF_EXEC_PAGESIZE,
41562 };
41563
41564@@ -78,6 +88,8 @@ static struct linux_binfmt elf_format = {
41565
41566 static int set_brk(unsigned long start, unsigned long end)
41567 {
41568+ unsigned long e = end;
41569+
41570 start = ELF_PAGEALIGN(start);
41571 end = ELF_PAGEALIGN(end);
41572 if (end > start) {
41573@@ -86,7 +98,7 @@ static int set_brk(unsigned long start, unsigned long end)
41574 if (BAD_ADDR(addr))
41575 return addr;
41576 }
41577- current->mm->start_brk = current->mm->brk = end;
41578+ current->mm->start_brk = current->mm->brk = e;
41579 return 0;
41580 }
41581
41582@@ -147,12 +159,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41583 elf_addr_t __user *u_rand_bytes;
41584 const char *k_platform = ELF_PLATFORM;
41585 const char *k_base_platform = ELF_BASE_PLATFORM;
41586- unsigned char k_rand_bytes[16];
41587+ u32 k_rand_bytes[4];
41588 int items;
41589 elf_addr_t *elf_info;
41590 int ei_index = 0;
41591 const struct cred *cred = current_cred();
41592 struct vm_area_struct *vma;
41593+ unsigned long saved_auxv[AT_VECTOR_SIZE];
41594
41595 /*
41596 * In some cases (e.g. Hyper-Threading), we want to avoid L1
41597@@ -194,8 +207,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41598 * Generate 16 random bytes for userspace PRNG seeding.
41599 */
41600 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
41601- u_rand_bytes = (elf_addr_t __user *)
41602- STACK_ALLOC(p, sizeof(k_rand_bytes));
41603+ srandom32(k_rand_bytes[0] ^ random32());
41604+ srandom32(k_rand_bytes[1] ^ random32());
41605+ srandom32(k_rand_bytes[2] ^ random32());
41606+ srandom32(k_rand_bytes[3] ^ random32());
41607+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
41608+ u_rand_bytes = (elf_addr_t __user *) p;
41609 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
41610 return -EFAULT;
41611
41612@@ -307,9 +324,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41613 return -EFAULT;
41614 current->mm->env_end = p;
41615
41616+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
41617+
41618 /* Put the elf_info on the stack in the right place. */
41619 sp = (elf_addr_t __user *)envp + 1;
41620- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
41621+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
41622 return -EFAULT;
41623 return 0;
41624 }
41625@@ -380,10 +399,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41626 {
41627 struct elf_phdr *elf_phdata;
41628 struct elf_phdr *eppnt;
41629- unsigned long load_addr = 0;
41630+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
41631 int load_addr_set = 0;
41632 unsigned long last_bss = 0, elf_bss = 0;
41633- unsigned long error = ~0UL;
41634+ unsigned long error = -EINVAL;
41635 unsigned long total_size;
41636 int retval, i, size;
41637
41638@@ -429,6 +448,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41639 goto out_close;
41640 }
41641
41642+#ifdef CONFIG_PAX_SEGMEXEC
41643+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
41644+ pax_task_size = SEGMEXEC_TASK_SIZE;
41645+#endif
41646+
41647 eppnt = elf_phdata;
41648 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
41649 if (eppnt->p_type == PT_LOAD) {
41650@@ -472,8 +496,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41651 k = load_addr + eppnt->p_vaddr;
41652 if (BAD_ADDR(k) ||
41653 eppnt->p_filesz > eppnt->p_memsz ||
41654- eppnt->p_memsz > TASK_SIZE ||
41655- TASK_SIZE - eppnt->p_memsz < k) {
41656+ eppnt->p_memsz > pax_task_size ||
41657+ pax_task_size - eppnt->p_memsz < k) {
41658 error = -ENOMEM;
41659 goto out_close;
41660 }
41661@@ -525,6 +549,351 @@ out:
41662 return error;
41663 }
41664
41665+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
41666+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
41667+{
41668+ unsigned long pax_flags = 0UL;
41669+
41670+#ifdef CONFIG_PAX_PT_PAX_FLAGS
41671+
41672+#ifdef CONFIG_PAX_PAGEEXEC
41673+ if (elf_phdata->p_flags & PF_PAGEEXEC)
41674+ pax_flags |= MF_PAX_PAGEEXEC;
41675+#endif
41676+
41677+#ifdef CONFIG_PAX_SEGMEXEC
41678+ if (elf_phdata->p_flags & PF_SEGMEXEC)
41679+ pax_flags |= MF_PAX_SEGMEXEC;
41680+#endif
41681+
41682+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41683+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41684+ if ((__supported_pte_mask & _PAGE_NX))
41685+ pax_flags &= ~MF_PAX_SEGMEXEC;
41686+ else
41687+ pax_flags &= ~MF_PAX_PAGEEXEC;
41688+ }
41689+#endif
41690+
41691+#ifdef CONFIG_PAX_EMUTRAMP
41692+ if (elf_phdata->p_flags & PF_EMUTRAMP)
41693+ pax_flags |= MF_PAX_EMUTRAMP;
41694+#endif
41695+
41696+#ifdef CONFIG_PAX_MPROTECT
41697+ if (elf_phdata->p_flags & PF_MPROTECT)
41698+ pax_flags |= MF_PAX_MPROTECT;
41699+#endif
41700+
41701+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41702+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
41703+ pax_flags |= MF_PAX_RANDMMAP;
41704+#endif
41705+
41706+#endif
41707+
41708+ return pax_flags;
41709+}
41710+
41711+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
41712+{
41713+ unsigned long pax_flags = 0UL;
41714+
41715+#ifdef CONFIG_PAX_PT_PAX_FLAGS
41716+
41717+#ifdef CONFIG_PAX_PAGEEXEC
41718+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
41719+ pax_flags |= MF_PAX_PAGEEXEC;
41720+#endif
41721+
41722+#ifdef CONFIG_PAX_SEGMEXEC
41723+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
41724+ pax_flags |= MF_PAX_SEGMEXEC;
41725+#endif
41726+
41727+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41728+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41729+ if ((__supported_pte_mask & _PAGE_NX))
41730+ pax_flags &= ~MF_PAX_SEGMEXEC;
41731+ else
41732+ pax_flags &= ~MF_PAX_PAGEEXEC;
41733+ }
41734+#endif
41735+
41736+#ifdef CONFIG_PAX_EMUTRAMP
41737+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
41738+ pax_flags |= MF_PAX_EMUTRAMP;
41739+#endif
41740+
41741+#ifdef CONFIG_PAX_MPROTECT
41742+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
41743+ pax_flags |= MF_PAX_MPROTECT;
41744+#endif
41745+
41746+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41747+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
41748+ pax_flags |= MF_PAX_RANDMMAP;
41749+#endif
41750+
41751+#endif
41752+
41753+ return pax_flags;
41754+}
41755+
41756+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
41757+{
41758+ unsigned long pax_flags = 0UL;
41759+
41760+#ifdef CONFIG_PAX_EI_PAX
41761+
41762+#ifdef CONFIG_PAX_PAGEEXEC
41763+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
41764+ pax_flags |= MF_PAX_PAGEEXEC;
41765+#endif
41766+
41767+#ifdef CONFIG_PAX_SEGMEXEC
41768+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
41769+ pax_flags |= MF_PAX_SEGMEXEC;
41770+#endif
41771+
41772+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41773+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41774+ if ((__supported_pte_mask & _PAGE_NX))
41775+ pax_flags &= ~MF_PAX_SEGMEXEC;
41776+ else
41777+ pax_flags &= ~MF_PAX_PAGEEXEC;
41778+ }
41779+#endif
41780+
41781+#ifdef CONFIG_PAX_EMUTRAMP
41782+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
41783+ pax_flags |= MF_PAX_EMUTRAMP;
41784+#endif
41785+
41786+#ifdef CONFIG_PAX_MPROTECT
41787+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
41788+ pax_flags |= MF_PAX_MPROTECT;
41789+#endif
41790+
41791+#ifdef CONFIG_PAX_ASLR
41792+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
41793+ pax_flags |= MF_PAX_RANDMMAP;
41794+#endif
41795+
41796+#else
41797+
41798+#ifdef CONFIG_PAX_PAGEEXEC
41799+ pax_flags |= MF_PAX_PAGEEXEC;
41800+#endif
41801+
41802+#ifdef CONFIG_PAX_MPROTECT
41803+ pax_flags |= MF_PAX_MPROTECT;
41804+#endif
41805+
41806+#ifdef CONFIG_PAX_RANDMMAP
41807+ pax_flags |= MF_PAX_RANDMMAP;
41808+#endif
41809+
41810+#ifdef CONFIG_PAX_SEGMEXEC
41811+ if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
41812+ pax_flags &= ~MF_PAX_PAGEEXEC;
41813+ pax_flags |= MF_PAX_SEGMEXEC;
41814+ }
41815+#endif
41816+
41817+#endif
41818+
41819+ return pax_flags;
41820+}
41821+
41822+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
41823+{
41824+
41825+#ifdef CONFIG_PAX_PT_PAX_FLAGS
41826+ unsigned long i;
41827+
41828+ for (i = 0UL; i < elf_ex->e_phnum; i++)
41829+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
41830+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
41831+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
41832+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
41833+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
41834+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
41835+ return ~0UL;
41836+
41837+#ifdef CONFIG_PAX_SOFTMODE
41838+ if (pax_softmode)
41839+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
41840+ else
41841+#endif
41842+
41843+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
41844+ break;
41845+ }
41846+#endif
41847+
41848+ return ~0UL;
41849+}
41850+
41851+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
41852+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
41853+{
41854+ unsigned long pax_flags = 0UL;
41855+
41856+#ifdef CONFIG_PAX_PAGEEXEC
41857+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
41858+ pax_flags |= MF_PAX_PAGEEXEC;
41859+#endif
41860+
41861+#ifdef CONFIG_PAX_SEGMEXEC
41862+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
41863+ pax_flags |= MF_PAX_SEGMEXEC;
41864+#endif
41865+
41866+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41867+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41868+ if ((__supported_pte_mask & _PAGE_NX))
41869+ pax_flags &= ~MF_PAX_SEGMEXEC;
41870+ else
41871+ pax_flags &= ~MF_PAX_PAGEEXEC;
41872+ }
41873+#endif
41874+
41875+#ifdef CONFIG_PAX_EMUTRAMP
41876+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
41877+ pax_flags |= MF_PAX_EMUTRAMP;
41878+#endif
41879+
41880+#ifdef CONFIG_PAX_MPROTECT
41881+ if (pax_flags_softmode & MF_PAX_MPROTECT)
41882+ pax_flags |= MF_PAX_MPROTECT;
41883+#endif
41884+
41885+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41886+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
41887+ pax_flags |= MF_PAX_RANDMMAP;
41888+#endif
41889+
41890+ return pax_flags;
41891+}
41892+
41893+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
41894+{
41895+ unsigned long pax_flags = 0UL;
41896+
41897+#ifdef CONFIG_PAX_PAGEEXEC
41898+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
41899+ pax_flags |= MF_PAX_PAGEEXEC;
41900+#endif
41901+
41902+#ifdef CONFIG_PAX_SEGMEXEC
41903+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
41904+ pax_flags |= MF_PAX_SEGMEXEC;
41905+#endif
41906+
41907+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41908+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41909+ if ((__supported_pte_mask & _PAGE_NX))
41910+ pax_flags &= ~MF_PAX_SEGMEXEC;
41911+ else
41912+ pax_flags &= ~MF_PAX_PAGEEXEC;
41913+ }
41914+#endif
41915+
41916+#ifdef CONFIG_PAX_EMUTRAMP
41917+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
41918+ pax_flags |= MF_PAX_EMUTRAMP;
41919+#endif
41920+
41921+#ifdef CONFIG_PAX_MPROTECT
41922+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
41923+ pax_flags |= MF_PAX_MPROTECT;
41924+#endif
41925+
41926+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41927+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
41928+ pax_flags |= MF_PAX_RANDMMAP;
41929+#endif
41930+
41931+ return pax_flags;
41932+}
41933+#endif
41934+
41935+static unsigned long pax_parse_xattr_pax(struct file * const file)
41936+{
41937+
41938+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
41939+ ssize_t xattr_size, i;
41940+ unsigned char xattr_value[5];
41941+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
41942+
41943+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
41944+ if (xattr_size <= 0)
41945+ return ~0UL;
41946+
41947+ for (i = 0; i < xattr_size; i++)
41948+ switch (xattr_value[i]) {
41949+ default:
41950+ return ~0UL;
41951+
41952+#define parse_flag(option1, option2, flag) \
41953+ case option1: \
41954+ pax_flags_hardmode |= MF_PAX_##flag; \
41955+ break; \
41956+ case option2: \
41957+ pax_flags_softmode |= MF_PAX_##flag; \
41958+ break;
41959+
41960+ parse_flag('p', 'P', PAGEEXEC);
41961+ parse_flag('e', 'E', EMUTRAMP);
41962+ parse_flag('m', 'M', MPROTECT);
41963+ parse_flag('r', 'R', RANDMMAP);
41964+ parse_flag('s', 'S', SEGMEXEC);
41965+
41966+#undef parse_flag
41967+ }
41968+
41969+ if (pax_flags_hardmode & pax_flags_softmode)
41970+ return ~0UL;
41971+
41972+#ifdef CONFIG_PAX_SOFTMODE
41973+ if (pax_softmode)
41974+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
41975+ else
41976+#endif
41977+
41978+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
41979+#else
41980+ return ~0UL;
41981+#endif
41982+
41983+}
41984+
41985+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
41986+{
41987+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
41988+
41989+ pax_flags = pax_parse_ei_pax(elf_ex);
41990+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
41991+ xattr_pax_flags = pax_parse_xattr_pax(file);
41992+
41993+ if (pt_pax_flags == ~0UL)
41994+ pt_pax_flags = xattr_pax_flags;
41995+ else if (xattr_pax_flags == ~0UL)
41996+ xattr_pax_flags = pt_pax_flags;
41997+ if (pt_pax_flags != xattr_pax_flags)
41998+ return -EINVAL;
41999+ if (pt_pax_flags != ~0UL)
42000+ pax_flags = pt_pax_flags;
42001+
42002+ if (0 > pax_check_flags(&pax_flags))
42003+ return -EINVAL;
42004+
42005+ current->mm->pax_flags = pax_flags;
42006+ return 0;
42007+}
42008+#endif
42009+
42010 /*
42011 * These are the functions used to load ELF style executables and shared
42012 * libraries. There is no binary dependent code anywhere else.
42013@@ -541,6 +910,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
42014 {
42015 unsigned int random_variable = 0;
42016
42017+#ifdef CONFIG_PAX_RANDUSTACK
42018+ if (randomize_va_space)
42019+ return stack_top - current->mm->delta_stack;
42020+#endif
42021+
42022 if ((current->flags & PF_RANDOMIZE) &&
42023 !(current->personality & ADDR_NO_RANDOMIZE)) {
42024 random_variable = get_random_int() & STACK_RND_MASK;
42025@@ -559,7 +933,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42026 unsigned long load_addr = 0, load_bias = 0;
42027 int load_addr_set = 0;
42028 char * elf_interpreter = NULL;
42029- unsigned long error;
42030+ unsigned long error = 0;
42031 struct elf_phdr *elf_ppnt, *elf_phdata;
42032 unsigned long elf_bss, elf_brk;
42033 int retval, i;
42034@@ -569,11 +943,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42035 unsigned long start_code, end_code, start_data, end_data;
42036 unsigned long reloc_func_desc __maybe_unused = 0;
42037 int executable_stack = EXSTACK_DEFAULT;
42038- unsigned long def_flags = 0;
42039 struct {
42040 struct elfhdr elf_ex;
42041 struct elfhdr interp_elf_ex;
42042 } *loc;
42043+ unsigned long pax_task_size = TASK_SIZE;
42044
42045 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
42046 if (!loc) {
42047@@ -709,11 +1083,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42048 goto out_free_dentry;
42049
42050 /* OK, This is the point of no return */
42051- current->mm->def_flags = def_flags;
42052+
42053+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42054+ current->mm->pax_flags = 0UL;
42055+#endif
42056+
42057+#ifdef CONFIG_PAX_DLRESOLVE
42058+ current->mm->call_dl_resolve = 0UL;
42059+#endif
42060+
42061+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
42062+ current->mm->call_syscall = 0UL;
42063+#endif
42064+
42065+#ifdef CONFIG_PAX_ASLR
42066+ current->mm->delta_mmap = 0UL;
42067+ current->mm->delta_stack = 0UL;
42068+#endif
42069+
42070+ current->mm->def_flags = 0;
42071+
42072+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
42073+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
42074+ send_sig(SIGKILL, current, 0);
42075+ goto out_free_dentry;
42076+ }
42077+#endif
42078+
42079+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
42080+ pax_set_initial_flags(bprm);
42081+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
42082+ if (pax_set_initial_flags_func)
42083+ (pax_set_initial_flags_func)(bprm);
42084+#endif
42085+
42086+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42087+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
42088+ current->mm->context.user_cs_limit = PAGE_SIZE;
42089+ current->mm->def_flags |= VM_PAGEEXEC;
42090+ }
42091+#endif
42092+
42093+#ifdef CONFIG_PAX_SEGMEXEC
42094+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
42095+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
42096+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
42097+ pax_task_size = SEGMEXEC_TASK_SIZE;
42098+ current->mm->def_flags |= VM_NOHUGEPAGE;
42099+ }
42100+#endif
42101+
42102+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
42103+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42104+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
42105+ put_cpu();
42106+ }
42107+#endif
42108
42109 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
42110 may depend on the personality. */
42111 SET_PERSONALITY(loc->elf_ex);
42112+
42113+#ifdef CONFIG_PAX_ASLR
42114+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42115+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
42116+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
42117+ }
42118+#endif
42119+
42120+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
42121+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42122+ executable_stack = EXSTACK_DISABLE_X;
42123+ current->personality &= ~READ_IMPLIES_EXEC;
42124+ } else
42125+#endif
42126+
42127 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
42128 current->personality |= READ_IMPLIES_EXEC;
42129
42130@@ -804,6 +1248,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42131 #else
42132 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
42133 #endif
42134+
42135+#ifdef CONFIG_PAX_RANDMMAP
42136+ /* PaX: randomize base address at the default exe base if requested */
42137+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
42138+#ifdef CONFIG_SPARC64
42139+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
42140+#else
42141+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
42142+#endif
42143+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
42144+ elf_flags |= MAP_FIXED;
42145+ }
42146+#endif
42147+
42148 }
42149
42150 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
42151@@ -836,9 +1294,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42152 * allowed task size. Note that p_filesz must always be
42153 * <= p_memsz so it is only necessary to check p_memsz.
42154 */
42155- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42156- elf_ppnt->p_memsz > TASK_SIZE ||
42157- TASK_SIZE - elf_ppnt->p_memsz < k) {
42158+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42159+ elf_ppnt->p_memsz > pax_task_size ||
42160+ pax_task_size - elf_ppnt->p_memsz < k) {
42161 /* set_brk can never work. Avoid overflows. */
42162 send_sig(SIGKILL, current, 0);
42163 retval = -EINVAL;
42164@@ -877,11 +1335,40 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42165 goto out_free_dentry;
42166 }
42167 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
42168- send_sig(SIGSEGV, current, 0);
42169- retval = -EFAULT; /* Nobody gets to see this, but.. */
42170- goto out_free_dentry;
42171+ /*
42172+ * This bss-zeroing can fail if the ELF
42173+ * file specifies odd protections. So
42174+ * we don't check the return value
42175+ */
42176 }
42177
42178+#ifdef CONFIG_PAX_RANDMMAP
42179+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42180+ unsigned long start, size;
42181+
42182+ start = ELF_PAGEALIGN(elf_brk);
42183+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
42184+ down_write(&current->mm->mmap_sem);
42185+ retval = -ENOMEM;
42186+ if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
42187+ unsigned long prot = PROT_NONE;
42188+
42189+ current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT;
42190+// if (current->personality & ADDR_NO_RANDOMIZE)
42191+// prot = PROT_READ;
42192+ start = do_mmap(NULL, start, size, prot, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
42193+ retval = IS_ERR_VALUE(start) ? start : 0;
42194+ }
42195+ up_write(&current->mm->mmap_sem);
42196+ if (retval == 0)
42197+ retval = set_brk(start + size, start + size + PAGE_SIZE);
42198+ if (retval < 0) {
42199+ send_sig(SIGKILL, current, 0);
42200+ goto out_free_dentry;
42201+ }
42202+ }
42203+#endif
42204+
42205 if (elf_interpreter) {
42206 unsigned long uninitialized_var(interp_map_addr);
42207
42208@@ -1109,7 +1596,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
42209 * Decide what to dump of a segment, part, all or none.
42210 */
42211 static unsigned long vma_dump_size(struct vm_area_struct *vma,
42212- unsigned long mm_flags)
42213+ unsigned long mm_flags, long signr)
42214 {
42215 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
42216
42217@@ -1146,7 +1633,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
42218 if (vma->vm_file == NULL)
42219 return 0;
42220
42221- if (FILTER(MAPPED_PRIVATE))
42222+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
42223 goto whole;
42224
42225 /*
42226@@ -1368,9 +1855,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
42227 {
42228 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
42229 int i = 0;
42230- do
42231+ do {
42232 i += 2;
42233- while (auxv[i - 2] != AT_NULL);
42234+ } while (auxv[i - 2] != AT_NULL);
42235 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
42236 }
42237
42238@@ -1892,14 +2379,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
42239 }
42240
42241 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
42242- unsigned long mm_flags)
42243+ struct coredump_params *cprm)
42244 {
42245 struct vm_area_struct *vma;
42246 size_t size = 0;
42247
42248 for (vma = first_vma(current, gate_vma); vma != NULL;
42249 vma = next_vma(vma, gate_vma))
42250- size += vma_dump_size(vma, mm_flags);
42251+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42252 return size;
42253 }
42254
42255@@ -1993,7 +2480,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42256
42257 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
42258
42259- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
42260+ offset += elf_core_vma_data_size(gate_vma, cprm);
42261 offset += elf_core_extra_data_size();
42262 e_shoff = offset;
42263
42264@@ -2007,10 +2494,12 @@ static int elf_core_dump(struct coredump_params *cprm)
42265 offset = dataoff;
42266
42267 size += sizeof(*elf);
42268+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42269 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
42270 goto end_coredump;
42271
42272 size += sizeof(*phdr4note);
42273+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42274 if (size > cprm->limit
42275 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
42276 goto end_coredump;
42277@@ -2024,7 +2513,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42278 phdr.p_offset = offset;
42279 phdr.p_vaddr = vma->vm_start;
42280 phdr.p_paddr = 0;
42281- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
42282+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42283 phdr.p_memsz = vma->vm_end - vma->vm_start;
42284 offset += phdr.p_filesz;
42285 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
42286@@ -2035,6 +2524,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42287 phdr.p_align = ELF_EXEC_PAGESIZE;
42288
42289 size += sizeof(phdr);
42290+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42291 if (size > cprm->limit
42292 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
42293 goto end_coredump;
42294@@ -2059,7 +2549,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42295 unsigned long addr;
42296 unsigned long end;
42297
42298- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
42299+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42300
42301 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
42302 struct page *page;
42303@@ -2068,6 +2558,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42304 page = get_dump_page(addr);
42305 if (page) {
42306 void *kaddr = kmap(page);
42307+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
42308 stop = ((size += PAGE_SIZE) > cprm->limit) ||
42309 !dump_write(cprm->file, kaddr,
42310 PAGE_SIZE);
42311@@ -2085,6 +2576,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42312
42313 if (e_phnum == PN_XNUM) {
42314 size += sizeof(*shdr4extnum);
42315+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42316 if (size > cprm->limit
42317 || !dump_write(cprm->file, shdr4extnum,
42318 sizeof(*shdr4extnum)))
42319@@ -2105,6 +2597,97 @@ out:
42320
42321 #endif /* CONFIG_ELF_CORE */
42322
42323+#ifdef CONFIG_PAX_MPROTECT
42324+/* PaX: non-PIC ELF libraries need relocations on their executable segments
42325+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
42326+ * we'll remove VM_MAYWRITE for good on RELRO segments.
42327+ *
42328+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
42329+ * basis because we want to allow the common case and not the special ones.
42330+ */
42331+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
42332+{
42333+ struct elfhdr elf_h;
42334+ struct elf_phdr elf_p;
42335+ unsigned long i;
42336+ unsigned long oldflags;
42337+ bool is_textrel_rw, is_textrel_rx, is_relro;
42338+
42339+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
42340+ return;
42341+
42342+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
42343+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
42344+
42345+#ifdef CONFIG_PAX_ELFRELOCS
42346+ /* possible TEXTREL */
42347+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
42348+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
42349+#else
42350+ is_textrel_rw = false;
42351+ is_textrel_rx = false;
42352+#endif
42353+
42354+ /* possible RELRO */
42355+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
42356+
42357+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
42358+ return;
42359+
42360+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
42361+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
42362+
42363+#ifdef CONFIG_PAX_ETEXECRELOCS
42364+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42365+#else
42366+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
42367+#endif
42368+
42369+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42370+ !elf_check_arch(&elf_h) ||
42371+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
42372+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
42373+ return;
42374+
42375+ for (i = 0UL; i < elf_h.e_phnum; i++) {
42376+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
42377+ return;
42378+ switch (elf_p.p_type) {
42379+ case PT_DYNAMIC:
42380+ if (!is_textrel_rw && !is_textrel_rx)
42381+ continue;
42382+ i = 0UL;
42383+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
42384+ elf_dyn dyn;
42385+
42386+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
42387+ return;
42388+ if (dyn.d_tag == DT_NULL)
42389+ return;
42390+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
42391+ gr_log_textrel(vma);
42392+ if (is_textrel_rw)
42393+ vma->vm_flags |= VM_MAYWRITE;
42394+ else
42395+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
42396+ vma->vm_flags &= ~VM_MAYWRITE;
42397+ return;
42398+ }
42399+ i++;
42400+ }
42401+ return;
42402+
42403+ case PT_GNU_RELRO:
42404+ if (!is_relro)
42405+ continue;
42406+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
42407+ vma->vm_flags &= ~VM_MAYWRITE;
42408+ return;
42409+ }
42410+ }
42411+}
42412+#endif
42413+
42414 static int __init init_elf_binfmt(void)
42415 {
42416 register_binfmt(&elf_format);
42417diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
42418index 6b2daf9..a70dccb 100644
42419--- a/fs/binfmt_flat.c
42420+++ b/fs/binfmt_flat.c
42421@@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
42422 realdatastart = (unsigned long) -ENOMEM;
42423 printk("Unable to allocate RAM for process data, errno %d\n",
42424 (int)-realdatastart);
42425+ down_write(&current->mm->mmap_sem);
42426 do_munmap(current->mm, textpos, text_len);
42427+ up_write(&current->mm->mmap_sem);
42428 ret = realdatastart;
42429 goto err;
42430 }
42431@@ -586,8 +588,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42432 }
42433 if (IS_ERR_VALUE(result)) {
42434 printk("Unable to read data+bss, errno %d\n", (int)-result);
42435+ down_write(&current->mm->mmap_sem);
42436 do_munmap(current->mm, textpos, text_len);
42437 do_munmap(current->mm, realdatastart, len);
42438+ up_write(&current->mm->mmap_sem);
42439 ret = result;
42440 goto err;
42441 }
42442@@ -654,8 +658,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42443 }
42444 if (IS_ERR_VALUE(result)) {
42445 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
42446+ down_write(&current->mm->mmap_sem);
42447 do_munmap(current->mm, textpos, text_len + data_len + extra +
42448 MAX_SHARED_LIBS * sizeof(unsigned long));
42449+ up_write(&current->mm->mmap_sem);
42450 ret = result;
42451 goto err;
42452 }
42453diff --git a/fs/bio.c b/fs/bio.c
42454index 84da885..2149cd9 100644
42455--- a/fs/bio.c
42456+++ b/fs/bio.c
42457@@ -838,7 +838,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
42458 /*
42459 * Overflow, abort
42460 */
42461- if (end < start)
42462+ if (end < start || end - start > INT_MAX - nr_pages)
42463 return ERR_PTR(-EINVAL);
42464
42465 nr_pages += end - start;
42466@@ -1234,7 +1234,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
42467 const int read = bio_data_dir(bio) == READ;
42468 struct bio_map_data *bmd = bio->bi_private;
42469 int i;
42470- char *p = bmd->sgvecs[0].iov_base;
42471+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
42472
42473 __bio_for_each_segment(bvec, bio, i, 0) {
42474 char *addr = page_address(bvec->bv_page);
42475diff --git a/fs/block_dev.c b/fs/block_dev.c
42476index ba11c30..623d736 100644
42477--- a/fs/block_dev.c
42478+++ b/fs/block_dev.c
42479@@ -704,7 +704,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
42480 else if (bdev->bd_contains == bdev)
42481 return true; /* is a whole device which isn't held */
42482
42483- else if (whole->bd_holder == bd_may_claim)
42484+ else if (whole->bd_holder == (void *)bd_may_claim)
42485 return true; /* is a partition of a device that is being partitioned */
42486 else if (whole->bd_holder != NULL)
42487 return false; /* is a partition of a held device */
42488diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
42489index c053e90..e5f1afc 100644
42490--- a/fs/btrfs/check-integrity.c
42491+++ b/fs/btrfs/check-integrity.c
42492@@ -156,7 +156,7 @@ struct btrfsic_block {
42493 union {
42494 bio_end_io_t *bio;
42495 bh_end_io_t *bh;
42496- } orig_bio_bh_end_io;
42497+ } __no_const orig_bio_bh_end_io;
42498 int submit_bio_bh_rw;
42499 u64 flush_gen; /* only valid if !never_written */
42500 };
42501diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
42502index 4106264..8157ede 100644
42503--- a/fs/btrfs/ctree.c
42504+++ b/fs/btrfs/ctree.c
42505@@ -513,9 +513,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
42506 free_extent_buffer(buf);
42507 add_root_to_dirty_list(root);
42508 } else {
42509- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
42510- parent_start = parent->start;
42511- else
42512+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
42513+ if (parent)
42514+ parent_start = parent->start;
42515+ else
42516+ parent_start = 0;
42517+ } else
42518 parent_start = 0;
42519
42520 WARN_ON(trans->transid != btrfs_header_generation(parent));
42521diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
42522index 0df0d1f..4bdcbfe 100644
42523--- a/fs/btrfs/inode.c
42524+++ b/fs/btrfs/inode.c
42525@@ -7074,7 +7074,7 @@ fail:
42526 return -ENOMEM;
42527 }
42528
42529-static int btrfs_getattr(struct vfsmount *mnt,
42530+int btrfs_getattr(struct vfsmount *mnt,
42531 struct dentry *dentry, struct kstat *stat)
42532 {
42533 struct inode *inode = dentry->d_inode;
42534@@ -7088,6 +7088,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
42535 return 0;
42536 }
42537
42538+EXPORT_SYMBOL(btrfs_getattr);
42539+
42540+dev_t get_btrfs_dev_from_inode(struct inode *inode)
42541+{
42542+ return BTRFS_I(inode)->root->anon_dev;
42543+}
42544+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
42545+
42546 /*
42547 * If a file is moved, it will inherit the cow and compression flags of the new
42548 * directory.
42549diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
42550index 14f8e1f..ab8d81f 100644
42551--- a/fs/btrfs/ioctl.c
42552+++ b/fs/btrfs/ioctl.c
42553@@ -2882,9 +2882,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42554 for (i = 0; i < num_types; i++) {
42555 struct btrfs_space_info *tmp;
42556
42557+ /* Don't copy in more than we allocated */
42558 if (!slot_count)
42559 break;
42560
42561+ slot_count--;
42562+
42563 info = NULL;
42564 rcu_read_lock();
42565 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
42566@@ -2906,15 +2909,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42567 memcpy(dest, &space, sizeof(space));
42568 dest++;
42569 space_args.total_spaces++;
42570- slot_count--;
42571 }
42572- if (!slot_count)
42573- break;
42574 }
42575 up_read(&info->groups_sem);
42576 }
42577
42578- user_dest = (struct btrfs_ioctl_space_info *)
42579+ user_dest = (struct btrfs_ioctl_space_info __user *)
42580 (arg + sizeof(struct btrfs_ioctl_space_args));
42581
42582 if (copy_to_user(user_dest, dest_orig, alloc_size))
42583diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
42584index 646ee21..f020f87 100644
42585--- a/fs/btrfs/relocation.c
42586+++ b/fs/btrfs/relocation.c
42587@@ -1268,7 +1268,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
42588 }
42589 spin_unlock(&rc->reloc_root_tree.lock);
42590
42591- BUG_ON((struct btrfs_root *)node->data != root);
42592+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
42593
42594 if (!del) {
42595 spin_lock(&rc->reloc_root_tree.lock);
42596diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
42597index 622f469..e8d2d55 100644
42598--- a/fs/cachefiles/bind.c
42599+++ b/fs/cachefiles/bind.c
42600@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
42601 args);
42602
42603 /* start by checking things over */
42604- ASSERT(cache->fstop_percent >= 0 &&
42605- cache->fstop_percent < cache->fcull_percent &&
42606+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
42607 cache->fcull_percent < cache->frun_percent &&
42608 cache->frun_percent < 100);
42609
42610- ASSERT(cache->bstop_percent >= 0 &&
42611- cache->bstop_percent < cache->bcull_percent &&
42612+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
42613 cache->bcull_percent < cache->brun_percent &&
42614 cache->brun_percent < 100);
42615
42616diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
42617index 0a1467b..6a53245 100644
42618--- a/fs/cachefiles/daemon.c
42619+++ b/fs/cachefiles/daemon.c
42620@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
42621 if (n > buflen)
42622 return -EMSGSIZE;
42623
42624- if (copy_to_user(_buffer, buffer, n) != 0)
42625+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
42626 return -EFAULT;
42627
42628 return n;
42629@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
42630 if (test_bit(CACHEFILES_DEAD, &cache->flags))
42631 return -EIO;
42632
42633- if (datalen < 0 || datalen > PAGE_SIZE - 1)
42634+ if (datalen > PAGE_SIZE - 1)
42635 return -EOPNOTSUPP;
42636
42637 /* drag the command string into the kernel so we can parse it */
42638@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
42639 if (args[0] != '%' || args[1] != '\0')
42640 return -EINVAL;
42641
42642- if (fstop < 0 || fstop >= cache->fcull_percent)
42643+ if (fstop >= cache->fcull_percent)
42644 return cachefiles_daemon_range_error(cache, args);
42645
42646 cache->fstop_percent = fstop;
42647@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
42648 if (args[0] != '%' || args[1] != '\0')
42649 return -EINVAL;
42650
42651- if (bstop < 0 || bstop >= cache->bcull_percent)
42652+ if (bstop >= cache->bcull_percent)
42653 return cachefiles_daemon_range_error(cache, args);
42654
42655 cache->bstop_percent = bstop;
42656diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
42657index bd6bc1b..b627b53 100644
42658--- a/fs/cachefiles/internal.h
42659+++ b/fs/cachefiles/internal.h
42660@@ -57,7 +57,7 @@ struct cachefiles_cache {
42661 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
42662 struct rb_root active_nodes; /* active nodes (can't be culled) */
42663 rwlock_t active_lock; /* lock for active_nodes */
42664- atomic_t gravecounter; /* graveyard uniquifier */
42665+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
42666 unsigned frun_percent; /* when to stop culling (% files) */
42667 unsigned fcull_percent; /* when to start culling (% files) */
42668 unsigned fstop_percent; /* when to stop allocating (% files) */
42669@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
42670 * proc.c
42671 */
42672 #ifdef CONFIG_CACHEFILES_HISTOGRAM
42673-extern atomic_t cachefiles_lookup_histogram[HZ];
42674-extern atomic_t cachefiles_mkdir_histogram[HZ];
42675-extern atomic_t cachefiles_create_histogram[HZ];
42676+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42677+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42678+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
42679
42680 extern int __init cachefiles_proc_init(void);
42681 extern void cachefiles_proc_cleanup(void);
42682 static inline
42683-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
42684+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
42685 {
42686 unsigned long jif = jiffies - start_jif;
42687 if (jif >= HZ)
42688 jif = HZ - 1;
42689- atomic_inc(&histogram[jif]);
42690+ atomic_inc_unchecked(&histogram[jif]);
42691 }
42692
42693 #else
42694diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
42695index 7f0771d..87d4f36 100644
42696--- a/fs/cachefiles/namei.c
42697+++ b/fs/cachefiles/namei.c
42698@@ -318,7 +318,7 @@ try_again:
42699 /* first step is to make up a grave dentry in the graveyard */
42700 sprintf(nbuffer, "%08x%08x",
42701 (uint32_t) get_seconds(),
42702- (uint32_t) atomic_inc_return(&cache->gravecounter));
42703+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
42704
42705 /* do the multiway lock magic */
42706 trap = lock_rename(cache->graveyard, dir);
42707diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
42708index eccd339..4c1d995 100644
42709--- a/fs/cachefiles/proc.c
42710+++ b/fs/cachefiles/proc.c
42711@@ -14,9 +14,9 @@
42712 #include <linux/seq_file.h>
42713 #include "internal.h"
42714
42715-atomic_t cachefiles_lookup_histogram[HZ];
42716-atomic_t cachefiles_mkdir_histogram[HZ];
42717-atomic_t cachefiles_create_histogram[HZ];
42718+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42719+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42720+atomic_unchecked_t cachefiles_create_histogram[HZ];
42721
42722 /*
42723 * display the latency histogram
42724@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
42725 return 0;
42726 default:
42727 index = (unsigned long) v - 3;
42728- x = atomic_read(&cachefiles_lookup_histogram[index]);
42729- y = atomic_read(&cachefiles_mkdir_histogram[index]);
42730- z = atomic_read(&cachefiles_create_histogram[index]);
42731+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
42732+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
42733+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
42734 if (x == 0 && y == 0 && z == 0)
42735 return 0;
42736
42737diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
42738index 0e3c092..818480e 100644
42739--- a/fs/cachefiles/rdwr.c
42740+++ b/fs/cachefiles/rdwr.c
42741@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
42742 old_fs = get_fs();
42743 set_fs(KERNEL_DS);
42744 ret = file->f_op->write(
42745- file, (const void __user *) data, len, &pos);
42746+ file, (const void __force_user *) data, len, &pos);
42747 set_fs(old_fs);
42748 kunmap(page);
42749 if (ret != len)
42750diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
42751index 3e8094b..cb3ff3d 100644
42752--- a/fs/ceph/dir.c
42753+++ b/fs/ceph/dir.c
42754@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
42755 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
42756 struct ceph_mds_client *mdsc = fsc->mdsc;
42757 unsigned frag = fpos_frag(filp->f_pos);
42758- int off = fpos_off(filp->f_pos);
42759+ unsigned int off = fpos_off(filp->f_pos);
42760 int err;
42761 u32 ftype;
42762 struct ceph_mds_reply_info_parsed *rinfo;
42763@@ -598,7 +598,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
42764 if (nd &&
42765 (nd->flags & LOOKUP_OPEN) &&
42766 !(nd->intent.open.flags & O_CREAT)) {
42767- int mode = nd->intent.open.create_mode & ~current->fs->umask;
42768+ int mode = nd->intent.open.create_mode & ~current_umask();
42769 return ceph_lookup_open(dir, dentry, nd, mode, 1);
42770 }
42771
42772diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
42773index 2704646..c581c91 100644
42774--- a/fs/cifs/cifs_debug.c
42775+++ b/fs/cifs/cifs_debug.c
42776@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
42777
42778 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
42779 #ifdef CONFIG_CIFS_STATS2
42780- atomic_set(&totBufAllocCount, 0);
42781- atomic_set(&totSmBufAllocCount, 0);
42782+ atomic_set_unchecked(&totBufAllocCount, 0);
42783+ atomic_set_unchecked(&totSmBufAllocCount, 0);
42784 #endif /* CONFIG_CIFS_STATS2 */
42785 spin_lock(&cifs_tcp_ses_lock);
42786 list_for_each(tmp1, &cifs_tcp_ses_list) {
42787@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
42788 tcon = list_entry(tmp3,
42789 struct cifs_tcon,
42790 tcon_list);
42791- atomic_set(&tcon->num_smbs_sent, 0);
42792- atomic_set(&tcon->num_writes, 0);
42793- atomic_set(&tcon->num_reads, 0);
42794- atomic_set(&tcon->num_oplock_brks, 0);
42795- atomic_set(&tcon->num_opens, 0);
42796- atomic_set(&tcon->num_posixopens, 0);
42797- atomic_set(&tcon->num_posixmkdirs, 0);
42798- atomic_set(&tcon->num_closes, 0);
42799- atomic_set(&tcon->num_deletes, 0);
42800- atomic_set(&tcon->num_mkdirs, 0);
42801- atomic_set(&tcon->num_rmdirs, 0);
42802- atomic_set(&tcon->num_renames, 0);
42803- atomic_set(&tcon->num_t2renames, 0);
42804- atomic_set(&tcon->num_ffirst, 0);
42805- atomic_set(&tcon->num_fnext, 0);
42806- atomic_set(&tcon->num_fclose, 0);
42807- atomic_set(&tcon->num_hardlinks, 0);
42808- atomic_set(&tcon->num_symlinks, 0);
42809- atomic_set(&tcon->num_locks, 0);
42810+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
42811+ atomic_set_unchecked(&tcon->num_writes, 0);
42812+ atomic_set_unchecked(&tcon->num_reads, 0);
42813+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
42814+ atomic_set_unchecked(&tcon->num_opens, 0);
42815+ atomic_set_unchecked(&tcon->num_posixopens, 0);
42816+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
42817+ atomic_set_unchecked(&tcon->num_closes, 0);
42818+ atomic_set_unchecked(&tcon->num_deletes, 0);
42819+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
42820+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
42821+ atomic_set_unchecked(&tcon->num_renames, 0);
42822+ atomic_set_unchecked(&tcon->num_t2renames, 0);
42823+ atomic_set_unchecked(&tcon->num_ffirst, 0);
42824+ atomic_set_unchecked(&tcon->num_fnext, 0);
42825+ atomic_set_unchecked(&tcon->num_fclose, 0);
42826+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
42827+ atomic_set_unchecked(&tcon->num_symlinks, 0);
42828+ atomic_set_unchecked(&tcon->num_locks, 0);
42829 }
42830 }
42831 }
42832@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
42833 smBufAllocCount.counter, cifs_min_small);
42834 #ifdef CONFIG_CIFS_STATS2
42835 seq_printf(m, "Total Large %d Small %d Allocations\n",
42836- atomic_read(&totBufAllocCount),
42837- atomic_read(&totSmBufAllocCount));
42838+ atomic_read_unchecked(&totBufAllocCount),
42839+ atomic_read_unchecked(&totSmBufAllocCount));
42840 #endif /* CONFIG_CIFS_STATS2 */
42841
42842 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
42843@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
42844 if (tcon->need_reconnect)
42845 seq_puts(m, "\tDISCONNECTED ");
42846 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
42847- atomic_read(&tcon->num_smbs_sent),
42848- atomic_read(&tcon->num_oplock_brks));
42849+ atomic_read_unchecked(&tcon->num_smbs_sent),
42850+ atomic_read_unchecked(&tcon->num_oplock_brks));
42851 seq_printf(m, "\nReads: %d Bytes: %lld",
42852- atomic_read(&tcon->num_reads),
42853+ atomic_read_unchecked(&tcon->num_reads),
42854 (long long)(tcon->bytes_read));
42855 seq_printf(m, "\nWrites: %d Bytes: %lld",
42856- atomic_read(&tcon->num_writes),
42857+ atomic_read_unchecked(&tcon->num_writes),
42858 (long long)(tcon->bytes_written));
42859 seq_printf(m, "\nFlushes: %d",
42860- atomic_read(&tcon->num_flushes));
42861+ atomic_read_unchecked(&tcon->num_flushes));
42862 seq_printf(m, "\nLocks: %d HardLinks: %d "
42863 "Symlinks: %d",
42864- atomic_read(&tcon->num_locks),
42865- atomic_read(&tcon->num_hardlinks),
42866- atomic_read(&tcon->num_symlinks));
42867+ atomic_read_unchecked(&tcon->num_locks),
42868+ atomic_read_unchecked(&tcon->num_hardlinks),
42869+ atomic_read_unchecked(&tcon->num_symlinks));
42870 seq_printf(m, "\nOpens: %d Closes: %d "
42871 "Deletes: %d",
42872- atomic_read(&tcon->num_opens),
42873- atomic_read(&tcon->num_closes),
42874- atomic_read(&tcon->num_deletes));
42875+ atomic_read_unchecked(&tcon->num_opens),
42876+ atomic_read_unchecked(&tcon->num_closes),
42877+ atomic_read_unchecked(&tcon->num_deletes));
42878 seq_printf(m, "\nPosix Opens: %d "
42879 "Posix Mkdirs: %d",
42880- atomic_read(&tcon->num_posixopens),
42881- atomic_read(&tcon->num_posixmkdirs));
42882+ atomic_read_unchecked(&tcon->num_posixopens),
42883+ atomic_read_unchecked(&tcon->num_posixmkdirs));
42884 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
42885- atomic_read(&tcon->num_mkdirs),
42886- atomic_read(&tcon->num_rmdirs));
42887+ atomic_read_unchecked(&tcon->num_mkdirs),
42888+ atomic_read_unchecked(&tcon->num_rmdirs));
42889 seq_printf(m, "\nRenames: %d T2 Renames %d",
42890- atomic_read(&tcon->num_renames),
42891- atomic_read(&tcon->num_t2renames));
42892+ atomic_read_unchecked(&tcon->num_renames),
42893+ atomic_read_unchecked(&tcon->num_t2renames));
42894 seq_printf(m, "\nFindFirst: %d FNext %d "
42895 "FClose %d",
42896- atomic_read(&tcon->num_ffirst),
42897- atomic_read(&tcon->num_fnext),
42898- atomic_read(&tcon->num_fclose));
42899+ atomic_read_unchecked(&tcon->num_ffirst),
42900+ atomic_read_unchecked(&tcon->num_fnext),
42901+ atomic_read_unchecked(&tcon->num_fclose));
42902 }
42903 }
42904 }
42905diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
42906index 541ef81..a78deb8 100644
42907--- a/fs/cifs/cifsfs.c
42908+++ b/fs/cifs/cifsfs.c
42909@@ -985,7 +985,7 @@ cifs_init_request_bufs(void)
42910 cifs_req_cachep = kmem_cache_create("cifs_request",
42911 CIFSMaxBufSize +
42912 MAX_CIFS_HDR_SIZE, 0,
42913- SLAB_HWCACHE_ALIGN, NULL);
42914+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
42915 if (cifs_req_cachep == NULL)
42916 return -ENOMEM;
42917
42918@@ -1012,7 +1012,7 @@ cifs_init_request_bufs(void)
42919 efficient to alloc 1 per page off the slab compared to 17K (5page)
42920 alloc of large cifs buffers even when page debugging is on */
42921 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
42922- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
42923+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
42924 NULL);
42925 if (cifs_sm_req_cachep == NULL) {
42926 mempool_destroy(cifs_req_poolp);
42927@@ -1097,8 +1097,8 @@ init_cifs(void)
42928 atomic_set(&bufAllocCount, 0);
42929 atomic_set(&smBufAllocCount, 0);
42930 #ifdef CONFIG_CIFS_STATS2
42931- atomic_set(&totBufAllocCount, 0);
42932- atomic_set(&totSmBufAllocCount, 0);
42933+ atomic_set_unchecked(&totBufAllocCount, 0);
42934+ atomic_set_unchecked(&totSmBufAllocCount, 0);
42935 #endif /* CONFIG_CIFS_STATS2 */
42936
42937 atomic_set(&midCount, 0);
42938diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
42939index 73fea28..b996b84 100644
42940--- a/fs/cifs/cifsglob.h
42941+++ b/fs/cifs/cifsglob.h
42942@@ -439,28 +439,28 @@ struct cifs_tcon {
42943 __u16 Flags; /* optional support bits */
42944 enum statusEnum tidStatus;
42945 #ifdef CONFIG_CIFS_STATS
42946- atomic_t num_smbs_sent;
42947- atomic_t num_writes;
42948- atomic_t num_reads;
42949- atomic_t num_flushes;
42950- atomic_t num_oplock_brks;
42951- atomic_t num_opens;
42952- atomic_t num_closes;
42953- atomic_t num_deletes;
42954- atomic_t num_mkdirs;
42955- atomic_t num_posixopens;
42956- atomic_t num_posixmkdirs;
42957- atomic_t num_rmdirs;
42958- atomic_t num_renames;
42959- atomic_t num_t2renames;
42960- atomic_t num_ffirst;
42961- atomic_t num_fnext;
42962- atomic_t num_fclose;
42963- atomic_t num_hardlinks;
42964- atomic_t num_symlinks;
42965- atomic_t num_locks;
42966- atomic_t num_acl_get;
42967- atomic_t num_acl_set;
42968+ atomic_unchecked_t num_smbs_sent;
42969+ atomic_unchecked_t num_writes;
42970+ atomic_unchecked_t num_reads;
42971+ atomic_unchecked_t num_flushes;
42972+ atomic_unchecked_t num_oplock_brks;
42973+ atomic_unchecked_t num_opens;
42974+ atomic_unchecked_t num_closes;
42975+ atomic_unchecked_t num_deletes;
42976+ atomic_unchecked_t num_mkdirs;
42977+ atomic_unchecked_t num_posixopens;
42978+ atomic_unchecked_t num_posixmkdirs;
42979+ atomic_unchecked_t num_rmdirs;
42980+ atomic_unchecked_t num_renames;
42981+ atomic_unchecked_t num_t2renames;
42982+ atomic_unchecked_t num_ffirst;
42983+ atomic_unchecked_t num_fnext;
42984+ atomic_unchecked_t num_fclose;
42985+ atomic_unchecked_t num_hardlinks;
42986+ atomic_unchecked_t num_symlinks;
42987+ atomic_unchecked_t num_locks;
42988+ atomic_unchecked_t num_acl_get;
42989+ atomic_unchecked_t num_acl_set;
42990 #ifdef CONFIG_CIFS_STATS2
42991 unsigned long long time_writes;
42992 unsigned long long time_reads;
42993@@ -677,7 +677,7 @@ convert_delimiter(char *path, char delim)
42994 }
42995
42996 #ifdef CONFIG_CIFS_STATS
42997-#define cifs_stats_inc atomic_inc
42998+#define cifs_stats_inc atomic_inc_unchecked
42999
43000 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
43001 unsigned int bytes)
43002@@ -1036,8 +1036,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
43003 /* Various Debug counters */
43004 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
43005 #ifdef CONFIG_CIFS_STATS2
43006-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
43007-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
43008+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
43009+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
43010 #endif
43011 GLOBAL_EXTERN atomic_t smBufAllocCount;
43012 GLOBAL_EXTERN atomic_t midCount;
43013diff --git a/fs/cifs/link.c b/fs/cifs/link.c
43014index 6b0e064..94e6c3c 100644
43015--- a/fs/cifs/link.c
43016+++ b/fs/cifs/link.c
43017@@ -600,7 +600,7 @@ symlink_exit:
43018
43019 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
43020 {
43021- char *p = nd_get_link(nd);
43022+ const char *p = nd_get_link(nd);
43023 if (!IS_ERR(p))
43024 kfree(p);
43025 }
43026diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
43027index c29d1aa..58018da 100644
43028--- a/fs/cifs/misc.c
43029+++ b/fs/cifs/misc.c
43030@@ -156,7 +156,7 @@ cifs_buf_get(void)
43031 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
43032 atomic_inc(&bufAllocCount);
43033 #ifdef CONFIG_CIFS_STATS2
43034- atomic_inc(&totBufAllocCount);
43035+ atomic_inc_unchecked(&totBufAllocCount);
43036 #endif /* CONFIG_CIFS_STATS2 */
43037 }
43038
43039@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
43040 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
43041 atomic_inc(&smBufAllocCount);
43042 #ifdef CONFIG_CIFS_STATS2
43043- atomic_inc(&totSmBufAllocCount);
43044+ atomic_inc_unchecked(&totSmBufAllocCount);
43045 #endif /* CONFIG_CIFS_STATS2 */
43046
43047 }
43048diff --git a/fs/coda/cache.c b/fs/coda/cache.c
43049index 6901578..d402eb5 100644
43050--- a/fs/coda/cache.c
43051+++ b/fs/coda/cache.c
43052@@ -24,7 +24,7 @@
43053 #include "coda_linux.h"
43054 #include "coda_cache.h"
43055
43056-static atomic_t permission_epoch = ATOMIC_INIT(0);
43057+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
43058
43059 /* replace or extend an acl cache hit */
43060 void coda_cache_enter(struct inode *inode, int mask)
43061@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
43062 struct coda_inode_info *cii = ITOC(inode);
43063
43064 spin_lock(&cii->c_lock);
43065- cii->c_cached_epoch = atomic_read(&permission_epoch);
43066+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
43067 if (cii->c_uid != current_fsuid()) {
43068 cii->c_uid = current_fsuid();
43069 cii->c_cached_perm = mask;
43070@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
43071 {
43072 struct coda_inode_info *cii = ITOC(inode);
43073 spin_lock(&cii->c_lock);
43074- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
43075+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
43076 spin_unlock(&cii->c_lock);
43077 }
43078
43079 /* remove all acl caches */
43080 void coda_cache_clear_all(struct super_block *sb)
43081 {
43082- atomic_inc(&permission_epoch);
43083+ atomic_inc_unchecked(&permission_epoch);
43084 }
43085
43086
43087@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
43088 spin_lock(&cii->c_lock);
43089 hit = (mask & cii->c_cached_perm) == mask &&
43090 cii->c_uid == current_fsuid() &&
43091- cii->c_cached_epoch == atomic_read(&permission_epoch);
43092+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
43093 spin_unlock(&cii->c_lock);
43094
43095 return hit;
43096diff --git a/fs/compat.c b/fs/compat.c
43097index f2944ac..62845d2 100644
43098--- a/fs/compat.c
43099+++ b/fs/compat.c
43100@@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
43101
43102 set_fs(KERNEL_DS);
43103 /* The __user pointer cast is valid because of the set_fs() */
43104- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
43105+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
43106 set_fs(oldfs);
43107 /* truncating is ok because it's a user address */
43108 if (!ret)
43109@@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
43110 goto out;
43111
43112 ret = -EINVAL;
43113- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
43114+ if (nr_segs > UIO_MAXIOV)
43115 goto out;
43116 if (nr_segs > fast_segs) {
43117 ret = -ENOMEM;
43118@@ -831,6 +831,7 @@ struct compat_old_linux_dirent {
43119
43120 struct compat_readdir_callback {
43121 struct compat_old_linux_dirent __user *dirent;
43122+ struct file * file;
43123 int result;
43124 };
43125
43126@@ -848,6 +849,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
43127 buf->result = -EOVERFLOW;
43128 return -EOVERFLOW;
43129 }
43130+
43131+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43132+ return 0;
43133+
43134 buf->result++;
43135 dirent = buf->dirent;
43136 if (!access_ok(VERIFY_WRITE, dirent,
43137@@ -880,6 +885,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
43138
43139 buf.result = 0;
43140 buf.dirent = dirent;
43141+ buf.file = file;
43142
43143 error = vfs_readdir(file, compat_fillonedir, &buf);
43144 if (buf.result)
43145@@ -900,6 +906,7 @@ struct compat_linux_dirent {
43146 struct compat_getdents_callback {
43147 struct compat_linux_dirent __user *current_dir;
43148 struct compat_linux_dirent __user *previous;
43149+ struct file * file;
43150 int count;
43151 int error;
43152 };
43153@@ -921,6 +928,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
43154 buf->error = -EOVERFLOW;
43155 return -EOVERFLOW;
43156 }
43157+
43158+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43159+ return 0;
43160+
43161 dirent = buf->previous;
43162 if (dirent) {
43163 if (__put_user(offset, &dirent->d_off))
43164@@ -968,6 +979,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
43165 buf.previous = NULL;
43166 buf.count = count;
43167 buf.error = 0;
43168+ buf.file = file;
43169
43170 error = vfs_readdir(file, compat_filldir, &buf);
43171 if (error >= 0)
43172@@ -989,6 +1001,7 @@ out:
43173 struct compat_getdents_callback64 {
43174 struct linux_dirent64 __user *current_dir;
43175 struct linux_dirent64 __user *previous;
43176+ struct file * file;
43177 int count;
43178 int error;
43179 };
43180@@ -1005,6 +1018,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
43181 buf->error = -EINVAL; /* only used if we fail.. */
43182 if (reclen > buf->count)
43183 return -EINVAL;
43184+
43185+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43186+ return 0;
43187+
43188 dirent = buf->previous;
43189
43190 if (dirent) {
43191@@ -1056,13 +1073,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
43192 buf.previous = NULL;
43193 buf.count = count;
43194 buf.error = 0;
43195+ buf.file = file;
43196
43197 error = vfs_readdir(file, compat_filldir64, &buf);
43198 if (error >= 0)
43199 error = buf.error;
43200 lastdirent = buf.previous;
43201 if (lastdirent) {
43202- typeof(lastdirent->d_off) d_off = file->f_pos;
43203+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
43204 if (__put_user_unaligned(d_off, &lastdirent->d_off))
43205 error = -EFAULT;
43206 else
43207diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
43208index 112e45a..b59845b 100644
43209--- a/fs/compat_binfmt_elf.c
43210+++ b/fs/compat_binfmt_elf.c
43211@@ -30,11 +30,13 @@
43212 #undef elf_phdr
43213 #undef elf_shdr
43214 #undef elf_note
43215+#undef elf_dyn
43216 #undef elf_addr_t
43217 #define elfhdr elf32_hdr
43218 #define elf_phdr elf32_phdr
43219 #define elf_shdr elf32_shdr
43220 #define elf_note elf32_note
43221+#define elf_dyn Elf32_Dyn
43222 #define elf_addr_t Elf32_Addr
43223
43224 /*
43225diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
43226index debdfe0..75d31d4 100644
43227--- a/fs/compat_ioctl.c
43228+++ b/fs/compat_ioctl.c
43229@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
43230
43231 err = get_user(palp, &up->palette);
43232 err |= get_user(length, &up->length);
43233+ if (err)
43234+ return -EFAULT;
43235
43236 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
43237 err = put_user(compat_ptr(palp), &up_native->palette);
43238@@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
43239 return -EFAULT;
43240 if (__get_user(udata, &ss32->iomem_base))
43241 return -EFAULT;
43242- ss.iomem_base = compat_ptr(udata);
43243+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
43244 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
43245 __get_user(ss.port_high, &ss32->port_high))
43246 return -EFAULT;
43247@@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
43248 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
43249 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
43250 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
43251- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43252+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43253 return -EFAULT;
43254
43255 return ioctl_preallocate(file, p);
43256@@ -1610,8 +1612,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
43257 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
43258 {
43259 unsigned int a, b;
43260- a = *(unsigned int *)p;
43261- b = *(unsigned int *)q;
43262+ a = *(const unsigned int *)p;
43263+ b = *(const unsigned int *)q;
43264 if (a > b)
43265 return 1;
43266 if (a < b)
43267diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
43268index 7e6c52d..94bc756 100644
43269--- a/fs/configfs/dir.c
43270+++ b/fs/configfs/dir.c
43271@@ -1564,7 +1564,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43272 }
43273 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
43274 struct configfs_dirent *next;
43275- const char * name;
43276+ const unsigned char * name;
43277+ char d_name[sizeof(next->s_dentry->d_iname)];
43278 int len;
43279 struct inode *inode = NULL;
43280
43281@@ -1574,7 +1575,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43282 continue;
43283
43284 name = configfs_get_name(next);
43285- len = strlen(name);
43286+ if (next->s_dentry && name == next->s_dentry->d_iname) {
43287+ len = next->s_dentry->d_name.len;
43288+ memcpy(d_name, name, len);
43289+ name = d_name;
43290+ } else
43291+ len = strlen(name);
43292
43293 /*
43294 * We'll have a dentry and an inode for
43295diff --git a/fs/dcache.c b/fs/dcache.c
43296index b80531c..8ca7e2d 100644
43297--- a/fs/dcache.c
43298+++ b/fs/dcache.c
43299@@ -3084,7 +3084,7 @@ void __init vfs_caches_init(unsigned long mempages)
43300 mempages -= reserve;
43301
43302 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
43303- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
43304+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
43305
43306 dcache_init();
43307 inode_init();
43308diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
43309index b80bc84..0d46d1a 100644
43310--- a/fs/debugfs/inode.c
43311+++ b/fs/debugfs/inode.c
43312@@ -408,7 +408,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
43313 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
43314 {
43315 return debugfs_create_file(name,
43316+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
43317+ S_IFDIR | S_IRWXU,
43318+#else
43319 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
43320+#endif
43321 parent, NULL, NULL);
43322 }
43323 EXPORT_SYMBOL_GPL(debugfs_create_dir);
43324diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
43325index ab35b11..b30af66 100644
43326--- a/fs/ecryptfs/inode.c
43327+++ b/fs/ecryptfs/inode.c
43328@@ -672,7 +672,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
43329 old_fs = get_fs();
43330 set_fs(get_ds());
43331 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
43332- (char __user *)lower_buf,
43333+ (char __force_user *)lower_buf,
43334 lower_bufsiz);
43335 set_fs(old_fs);
43336 if (rc < 0)
43337@@ -718,7 +718,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
43338 }
43339 old_fs = get_fs();
43340 set_fs(get_ds());
43341- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
43342+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
43343 set_fs(old_fs);
43344 if (rc < 0) {
43345 kfree(buf);
43346@@ -733,7 +733,7 @@ out:
43347 static void
43348 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
43349 {
43350- char *buf = nd_get_link(nd);
43351+ const char *buf = nd_get_link(nd);
43352 if (!IS_ERR(buf)) {
43353 /* Free the char* */
43354 kfree(buf);
43355diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
43356index 3a06f40..f7af544 100644
43357--- a/fs/ecryptfs/miscdev.c
43358+++ b/fs/ecryptfs/miscdev.c
43359@@ -345,7 +345,7 @@ check_list:
43360 goto out_unlock_msg_ctx;
43361 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
43362 if (msg_ctx->msg) {
43363- if (copy_to_user(&buf[i], packet_length, packet_length_size))
43364+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
43365 goto out_unlock_msg_ctx;
43366 i += packet_length_size;
43367 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
43368diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
43369index b2a34a1..162fa69 100644
43370--- a/fs/ecryptfs/read_write.c
43371+++ b/fs/ecryptfs/read_write.c
43372@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
43373 return -EIO;
43374 fs_save = get_fs();
43375 set_fs(get_ds());
43376- rc = vfs_write(lower_file, data, size, &offset);
43377+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
43378 set_fs(fs_save);
43379 mark_inode_dirty_sync(ecryptfs_inode);
43380 return rc;
43381@@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
43382 return -EIO;
43383 fs_save = get_fs();
43384 set_fs(get_ds());
43385- rc = vfs_read(lower_file, data, size, &offset);
43386+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
43387 set_fs(fs_save);
43388 return rc;
43389 }
43390diff --git a/fs/exec.c b/fs/exec.c
43391index b1fd202..582240d 100644
43392--- a/fs/exec.c
43393+++ b/fs/exec.c
43394@@ -55,6 +55,15 @@
43395 #include <linux/pipe_fs_i.h>
43396 #include <linux/oom.h>
43397 #include <linux/compat.h>
43398+#include <linux/random.h>
43399+#include <linux/seq_file.h>
43400+
43401+#ifdef CONFIG_PAX_REFCOUNT
43402+#include <linux/kallsyms.h>
43403+#include <linux/kdebug.h>
43404+#endif
43405+
43406+#include <trace/events/fs.h>
43407
43408 #include <asm/uaccess.h>
43409 #include <asm/mmu_context.h>
43410@@ -66,6 +75,18 @@
43411
43412 #include <trace/events/sched.h>
43413
43414+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
43415+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
43416+{
43417+ WARN_ONCE(1, "PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
43418+}
43419+#endif
43420+
43421+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
43422+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
43423+EXPORT_SYMBOL(pax_set_initial_flags_func);
43424+#endif
43425+
43426 int core_uses_pid;
43427 char core_pattern[CORENAME_MAX_SIZE] = "core";
43428 unsigned int core_pipe_limit;
43429@@ -75,7 +96,7 @@ struct core_name {
43430 char *corename;
43431 int used, size;
43432 };
43433-static atomic_t call_count = ATOMIC_INIT(1);
43434+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
43435
43436 /* The maximal length of core_pattern is also specified in sysctl.c */
43437
43438@@ -191,18 +212,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43439 int write)
43440 {
43441 struct page *page;
43442- int ret;
43443
43444-#ifdef CONFIG_STACK_GROWSUP
43445- if (write) {
43446- ret = expand_downwards(bprm->vma, pos);
43447- if (ret < 0)
43448- return NULL;
43449- }
43450-#endif
43451- ret = get_user_pages(current, bprm->mm, pos,
43452- 1, write, 1, &page, NULL);
43453- if (ret <= 0)
43454+ if (0 > expand_downwards(bprm->vma, pos))
43455+ return NULL;
43456+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
43457 return NULL;
43458
43459 if (write) {
43460@@ -218,6 +231,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43461 if (size <= ARG_MAX)
43462 return page;
43463
43464+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43465+ // only allow 512KB for argv+env on suid/sgid binaries
43466+ // to prevent easy ASLR exhaustion
43467+ if (((bprm->cred->euid != current_euid()) ||
43468+ (bprm->cred->egid != current_egid())) &&
43469+ (size > (512 * 1024))) {
43470+ put_page(page);
43471+ return NULL;
43472+ }
43473+#endif
43474+
43475 /*
43476 * Limit to 1/4-th the stack size for the argv+env strings.
43477 * This ensures that:
43478@@ -277,6 +301,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43479 vma->vm_end = STACK_TOP_MAX;
43480 vma->vm_start = vma->vm_end - PAGE_SIZE;
43481 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
43482+
43483+#ifdef CONFIG_PAX_SEGMEXEC
43484+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
43485+#endif
43486+
43487 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
43488 INIT_LIST_HEAD(&vma->anon_vma_chain);
43489
43490@@ -291,6 +320,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43491 mm->stack_vm = mm->total_vm = 1;
43492 up_write(&mm->mmap_sem);
43493 bprm->p = vma->vm_end - sizeof(void *);
43494+
43495+#ifdef CONFIG_PAX_RANDUSTACK
43496+ if (randomize_va_space)
43497+ bprm->p ^= random32() & ~PAGE_MASK;
43498+#endif
43499+
43500 return 0;
43501 err:
43502 up_write(&mm->mmap_sem);
43503@@ -399,19 +434,7 @@ err:
43504 return err;
43505 }
43506
43507-struct user_arg_ptr {
43508-#ifdef CONFIG_COMPAT
43509- bool is_compat;
43510-#endif
43511- union {
43512- const char __user *const __user *native;
43513-#ifdef CONFIG_COMPAT
43514- compat_uptr_t __user *compat;
43515-#endif
43516- } ptr;
43517-};
43518-
43519-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43520+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43521 {
43522 const char __user *native;
43523
43524@@ -420,14 +443,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43525 compat_uptr_t compat;
43526
43527 if (get_user(compat, argv.ptr.compat + nr))
43528- return ERR_PTR(-EFAULT);
43529+ return (const char __force_user *)ERR_PTR(-EFAULT);
43530
43531 return compat_ptr(compat);
43532 }
43533 #endif
43534
43535 if (get_user(native, argv.ptr.native + nr))
43536- return ERR_PTR(-EFAULT);
43537+ return (const char __force_user *)ERR_PTR(-EFAULT);
43538
43539 return native;
43540 }
43541@@ -446,7 +469,7 @@ static int count(struct user_arg_ptr argv, int max)
43542 if (!p)
43543 break;
43544
43545- if (IS_ERR(p))
43546+ if (IS_ERR((const char __force_kernel *)p))
43547 return -EFAULT;
43548
43549 if (i++ >= max)
43550@@ -480,7 +503,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
43551
43552 ret = -EFAULT;
43553 str = get_user_arg_ptr(argv, argc);
43554- if (IS_ERR(str))
43555+ if (IS_ERR((const char __force_kernel *)str))
43556 goto out;
43557
43558 len = strnlen_user(str, MAX_ARG_STRLEN);
43559@@ -562,7 +585,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
43560 int r;
43561 mm_segment_t oldfs = get_fs();
43562 struct user_arg_ptr argv = {
43563- .ptr.native = (const char __user *const __user *)__argv,
43564+ .ptr.native = (const char __force_user *const __force_user *)__argv,
43565 };
43566
43567 set_fs(KERNEL_DS);
43568@@ -597,7 +620,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43569 unsigned long new_end = old_end - shift;
43570 struct mmu_gather tlb;
43571
43572- BUG_ON(new_start > new_end);
43573+ if (new_start >= new_end || new_start < mmap_min_addr)
43574+ return -ENOMEM;
43575
43576 /*
43577 * ensure there are no vmas between where we want to go
43578@@ -606,6 +630,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43579 if (vma != find_vma(mm, new_start))
43580 return -EFAULT;
43581
43582+#ifdef CONFIG_PAX_SEGMEXEC
43583+ BUG_ON(pax_find_mirror_vma(vma));
43584+#endif
43585+
43586 /*
43587 * cover the whole range: [new_start, old_end)
43588 */
43589@@ -686,10 +714,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43590 stack_top = arch_align_stack(stack_top);
43591 stack_top = PAGE_ALIGN(stack_top);
43592
43593- if (unlikely(stack_top < mmap_min_addr) ||
43594- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
43595- return -ENOMEM;
43596-
43597 stack_shift = vma->vm_end - stack_top;
43598
43599 bprm->p -= stack_shift;
43600@@ -701,8 +725,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
43601 bprm->exec -= stack_shift;
43602
43603 down_write(&mm->mmap_sem);
43604+
43605+ /* Move stack pages down in memory. */
43606+ if (stack_shift) {
43607+ ret = shift_arg_pages(vma, stack_shift);
43608+ if (ret)
43609+ goto out_unlock;
43610+ }
43611+
43612 vm_flags = VM_STACK_FLAGS;
43613
43614+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43615+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43616+ vm_flags &= ~VM_EXEC;
43617+
43618+#ifdef CONFIG_PAX_MPROTECT
43619+ if (mm->pax_flags & MF_PAX_MPROTECT)
43620+ vm_flags &= ~VM_MAYEXEC;
43621+#endif
43622+
43623+ }
43624+#endif
43625+
43626 /*
43627 * Adjust stack execute permissions; explicitly enable for
43628 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
43629@@ -721,13 +765,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43630 goto out_unlock;
43631 BUG_ON(prev != vma);
43632
43633- /* Move stack pages down in memory. */
43634- if (stack_shift) {
43635- ret = shift_arg_pages(vma, stack_shift);
43636- if (ret)
43637- goto out_unlock;
43638- }
43639-
43640 /* mprotect_fixup is overkill to remove the temporary stack flags */
43641 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
43642
43643@@ -785,6 +822,8 @@ struct file *open_exec(const char *name)
43644
43645 fsnotify_open(file);
43646
43647+ trace_open_exec(name);
43648+
43649 err = deny_write_access(file);
43650 if (err)
43651 goto exit;
43652@@ -808,7 +847,7 @@ int kernel_read(struct file *file, loff_t offset,
43653 old_fs = get_fs();
43654 set_fs(get_ds());
43655 /* The cast to a user pointer is valid due to the set_fs() */
43656- result = vfs_read(file, (void __user *)addr, count, &pos);
43657+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
43658 set_fs(old_fs);
43659 return result;
43660 }
43661@@ -1254,7 +1293,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
43662 }
43663 rcu_read_unlock();
43664
43665- if (p->fs->users > n_fs) {
43666+ if (atomic_read(&p->fs->users) > n_fs) {
43667 bprm->unsafe |= LSM_UNSAFE_SHARE;
43668 } else {
43669 res = -EAGAIN;
43670@@ -1451,6 +1490,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
43671
43672 EXPORT_SYMBOL(search_binary_handler);
43673
43674+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43675+static DEFINE_PER_CPU(u64, exec_counter);
43676+static int __init init_exec_counters(void)
43677+{
43678+ unsigned int cpu;
43679+
43680+ for_each_possible_cpu(cpu) {
43681+ per_cpu(exec_counter, cpu) = (u64)cpu;
43682+ }
43683+
43684+ return 0;
43685+}
43686+early_initcall(init_exec_counters);
43687+static inline void increment_exec_counter(void)
43688+{
43689+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
43690+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
43691+}
43692+#else
43693+static inline void increment_exec_counter(void) {}
43694+#endif
43695+
43696 /*
43697 * sys_execve() executes a new program.
43698 */
43699@@ -1459,6 +1520,11 @@ static int do_execve_common(const char *filename,
43700 struct user_arg_ptr envp,
43701 struct pt_regs *regs)
43702 {
43703+#ifdef CONFIG_GRKERNSEC
43704+ struct file *old_exec_file;
43705+ struct acl_subject_label *old_acl;
43706+ struct rlimit old_rlim[RLIM_NLIMITS];
43707+#endif
43708 struct linux_binprm *bprm;
43709 struct file *file;
43710 struct files_struct *displaced;
43711@@ -1466,6 +1532,8 @@ static int do_execve_common(const char *filename,
43712 int retval;
43713 const struct cred *cred = current_cred();
43714
43715+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
43716+
43717 /*
43718 * We move the actual failure in case of RLIMIT_NPROC excess from
43719 * set*uid() to execve() because too many poorly written programs
43720@@ -1506,12 +1574,27 @@ static int do_execve_common(const char *filename,
43721 if (IS_ERR(file))
43722 goto out_unmark;
43723
43724+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
43725+ retval = -EPERM;
43726+ goto out_file;
43727+ }
43728+
43729 sched_exec();
43730
43731 bprm->file = file;
43732 bprm->filename = filename;
43733 bprm->interp = filename;
43734
43735+ if (gr_process_user_ban()) {
43736+ retval = -EPERM;
43737+ goto out_file;
43738+ }
43739+
43740+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
43741+ retval = -EACCES;
43742+ goto out_file;
43743+ }
43744+
43745 retval = bprm_mm_init(bprm);
43746 if (retval)
43747 goto out_file;
43748@@ -1528,24 +1611,65 @@ static int do_execve_common(const char *filename,
43749 if (retval < 0)
43750 goto out;
43751
43752+#ifdef CONFIG_GRKERNSEC
43753+ old_acl = current->acl;
43754+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
43755+ old_exec_file = current->exec_file;
43756+ get_file(file);
43757+ current->exec_file = file;
43758+#endif
43759+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43760+ /* limit suid stack to 8MB
43761+ we saved the old limits above and will restore them if this exec fails
43762+ */
43763+ if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
43764+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
43765+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
43766+#endif
43767+
43768+ if (!gr_tpe_allow(file)) {
43769+ retval = -EACCES;
43770+ goto out_fail;
43771+ }
43772+
43773+ if (gr_check_crash_exec(file)) {
43774+ retval = -EACCES;
43775+ goto out_fail;
43776+ }
43777+
43778+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
43779+ bprm->unsafe);
43780+ if (retval < 0)
43781+ goto out_fail;
43782+
43783 retval = copy_strings_kernel(1, &bprm->filename, bprm);
43784 if (retval < 0)
43785- goto out;
43786+ goto out_fail;
43787
43788 bprm->exec = bprm->p;
43789 retval = copy_strings(bprm->envc, envp, bprm);
43790 if (retval < 0)
43791- goto out;
43792+ goto out_fail;
43793
43794 retval = copy_strings(bprm->argc, argv, bprm);
43795 if (retval < 0)
43796- goto out;
43797+ goto out_fail;
43798+
43799+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
43800+
43801+ gr_handle_exec_args(bprm, argv);
43802
43803 retval = search_binary_handler(bprm,regs);
43804 if (retval < 0)
43805- goto out;
43806+ goto out_fail;
43807+#ifdef CONFIG_GRKERNSEC
43808+ if (old_exec_file)
43809+ fput(old_exec_file);
43810+#endif
43811
43812 /* execve succeeded */
43813+
43814+ increment_exec_counter();
43815 current->fs->in_exec = 0;
43816 current->in_execve = 0;
43817 acct_update_integrals(current);
43818@@ -1554,6 +1678,14 @@ static int do_execve_common(const char *filename,
43819 put_files_struct(displaced);
43820 return retval;
43821
43822+out_fail:
43823+#ifdef CONFIG_GRKERNSEC
43824+ current->acl = old_acl;
43825+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
43826+ fput(current->exec_file);
43827+ current->exec_file = old_exec_file;
43828+#endif
43829+
43830 out:
43831 if (bprm->mm) {
43832 acct_arg_size(bprm, 0);
43833@@ -1627,7 +1759,7 @@ static int expand_corename(struct core_name *cn)
43834 {
43835 char *old_corename = cn->corename;
43836
43837- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
43838+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
43839 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
43840
43841 if (!cn->corename) {
43842@@ -1724,7 +1856,7 @@ static int format_corename(struct core_name *cn, long signr)
43843 int pid_in_pattern = 0;
43844 int err = 0;
43845
43846- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
43847+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
43848 cn->corename = kmalloc(cn->size, GFP_KERNEL);
43849 cn->used = 0;
43850
43851@@ -1821,6 +1953,228 @@ out:
43852 return ispipe;
43853 }
43854
43855+int pax_check_flags(unsigned long *flags)
43856+{
43857+ int retval = 0;
43858+
43859+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
43860+ if (*flags & MF_PAX_SEGMEXEC)
43861+ {
43862+ *flags &= ~MF_PAX_SEGMEXEC;
43863+ retval = -EINVAL;
43864+ }
43865+#endif
43866+
43867+ if ((*flags & MF_PAX_PAGEEXEC)
43868+
43869+#ifdef CONFIG_PAX_PAGEEXEC
43870+ && (*flags & MF_PAX_SEGMEXEC)
43871+#endif
43872+
43873+ )
43874+ {
43875+ *flags &= ~MF_PAX_PAGEEXEC;
43876+ retval = -EINVAL;
43877+ }
43878+
43879+ if ((*flags & MF_PAX_MPROTECT)
43880+
43881+#ifdef CONFIG_PAX_MPROTECT
43882+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
43883+#endif
43884+
43885+ )
43886+ {
43887+ *flags &= ~MF_PAX_MPROTECT;
43888+ retval = -EINVAL;
43889+ }
43890+
43891+ if ((*flags & MF_PAX_EMUTRAMP)
43892+
43893+#ifdef CONFIG_PAX_EMUTRAMP
43894+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
43895+#endif
43896+
43897+ )
43898+ {
43899+ *flags &= ~MF_PAX_EMUTRAMP;
43900+ retval = -EINVAL;
43901+ }
43902+
43903+ return retval;
43904+}
43905+
43906+EXPORT_SYMBOL(pax_check_flags);
43907+
43908+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43909+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
43910+{
43911+ struct task_struct *tsk = current;
43912+ struct mm_struct *mm = current->mm;
43913+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
43914+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
43915+ char *path_exec = NULL;
43916+ char *path_fault = NULL;
43917+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
43918+
43919+ if (buffer_exec && buffer_fault) {
43920+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
43921+
43922+ down_read(&mm->mmap_sem);
43923+ vma = mm->mmap;
43924+ while (vma && (!vma_exec || !vma_fault)) {
43925+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
43926+ vma_exec = vma;
43927+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
43928+ vma_fault = vma;
43929+ vma = vma->vm_next;
43930+ }
43931+ if (vma_exec) {
43932+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
43933+ if (IS_ERR(path_exec))
43934+ path_exec = "<path too long>";
43935+ else {
43936+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
43937+ if (path_exec) {
43938+ *path_exec = 0;
43939+ path_exec = buffer_exec;
43940+ } else
43941+ path_exec = "<path too long>";
43942+ }
43943+ }
43944+ if (vma_fault) {
43945+ start = vma_fault->vm_start;
43946+ end = vma_fault->vm_end;
43947+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
43948+ if (vma_fault->vm_file) {
43949+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
43950+ if (IS_ERR(path_fault))
43951+ path_fault = "<path too long>";
43952+ else {
43953+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
43954+ if (path_fault) {
43955+ *path_fault = 0;
43956+ path_fault = buffer_fault;
43957+ } else
43958+ path_fault = "<path too long>";
43959+ }
43960+ } else
43961+ path_fault = "<anonymous mapping>";
43962+ }
43963+ up_read(&mm->mmap_sem);
43964+ }
43965+ if (tsk->signal->curr_ip)
43966+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
43967+ else
43968+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
43969+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
43970+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
43971+ task_uid(tsk), task_euid(tsk), pc, sp);
43972+ free_page((unsigned long)buffer_exec);
43973+ free_page((unsigned long)buffer_fault);
43974+ pax_report_insns(regs, pc, sp);
43975+ do_coredump(SIGKILL, SIGKILL, regs);
43976+}
43977+#endif
43978+
43979+#ifdef CONFIG_PAX_REFCOUNT
43980+void pax_report_refcount_overflow(struct pt_regs *regs)
43981+{
43982+ if (current->signal->curr_ip)
43983+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
43984+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
43985+ else
43986+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
43987+ current->comm, task_pid_nr(current), current_uid(), current_euid());
43988+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
43989+ show_regs(regs);
43990+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
43991+}
43992+#endif
43993+
43994+#ifdef CONFIG_PAX_USERCOPY
43995+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
43996+int object_is_on_stack(const void *obj, unsigned long len)
43997+{
43998+ const void * const stack = task_stack_page(current);
43999+ const void * const stackend = stack + THREAD_SIZE;
44000+
44001+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
44002+ const void *frame = NULL;
44003+ const void *oldframe;
44004+#endif
44005+
44006+ if (obj + len < obj)
44007+ return -1;
44008+
44009+ if (obj + len <= stack || stackend <= obj)
44010+ return 0;
44011+
44012+ if (obj < stack || stackend < obj + len)
44013+ return -1;
44014+
44015+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
44016+ oldframe = __builtin_frame_address(1);
44017+ if (oldframe)
44018+ frame = __builtin_frame_address(2);
44019+ /*
44020+ low ----------------------------------------------> high
44021+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
44022+ ^----------------^
44023+ allow copies only within here
44024+ */
44025+ while (stack <= frame && frame < stackend) {
44026+ /* if obj + len extends past the last frame, this
44027+ check won't pass and the next frame will be 0,
44028+ causing us to bail out and correctly report
44029+ the copy as invalid
44030+ */
44031+ if (obj + len <= frame)
44032+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
44033+ oldframe = frame;
44034+ frame = *(const void * const *)frame;
44035+ }
44036+ return -1;
44037+#else
44038+ return 1;
44039+#endif
44040+}
44041+
44042+__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
44043+{
44044+ if (current->signal->curr_ip)
44045+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44046+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
44047+ else
44048+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44049+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
44050+ dump_stack();
44051+ gr_handle_kernel_exploit();
44052+ do_group_exit(SIGKILL);
44053+}
44054+#endif
44055+
44056+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
44057+void pax_track_stack(void)
44058+{
44059+ unsigned long sp = (unsigned long)&sp;
44060+ if (sp < current_thread_info()->lowest_stack &&
44061+ sp > (unsigned long)task_stack_page(current))
44062+ current_thread_info()->lowest_stack = sp;
44063+}
44064+EXPORT_SYMBOL(pax_track_stack);
44065+#endif
44066+
44067+#ifdef CONFIG_PAX_SIZE_OVERFLOW
44068+void report_size_overflow(const char *file, unsigned int line, const char *func)
44069+{
44070+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u\n", func, file, line);
44071+ dump_stack();
44072+ do_group_exit(SIGKILL);
44073+}
44074+EXPORT_SYMBOL(report_size_overflow);
44075+#endif
44076+
44077 static int zap_process(struct task_struct *start, int exit_code)
44078 {
44079 struct task_struct *t;
44080@@ -2018,17 +2372,17 @@ static void wait_for_dump_helpers(struct file *file)
44081 pipe = file->f_path.dentry->d_inode->i_pipe;
44082
44083 pipe_lock(pipe);
44084- pipe->readers++;
44085- pipe->writers--;
44086+ atomic_inc(&pipe->readers);
44087+ atomic_dec(&pipe->writers);
44088
44089- while ((pipe->readers > 1) && (!signal_pending(current))) {
44090+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
44091 wake_up_interruptible_sync(&pipe->wait);
44092 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44093 pipe_wait(pipe);
44094 }
44095
44096- pipe->readers--;
44097- pipe->writers++;
44098+ atomic_dec(&pipe->readers);
44099+ atomic_inc(&pipe->writers);
44100 pipe_unlock(pipe);
44101
44102 }
44103@@ -2089,7 +2443,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44104 int retval = 0;
44105 int flag = 0;
44106 int ispipe;
44107- static atomic_t core_dump_count = ATOMIC_INIT(0);
44108+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
44109 struct coredump_params cprm = {
44110 .signr = signr,
44111 .regs = regs,
44112@@ -2104,6 +2458,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44113
44114 audit_core_dumps(signr);
44115
44116+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
44117+ gr_handle_brute_attach(current, cprm.mm_flags);
44118+
44119 binfmt = mm->binfmt;
44120 if (!binfmt || !binfmt->core_dump)
44121 goto fail;
44122@@ -2171,7 +2528,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44123 }
44124 cprm.limit = RLIM_INFINITY;
44125
44126- dump_count = atomic_inc_return(&core_dump_count);
44127+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
44128 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
44129 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
44130 task_tgid_vnr(current), current->comm);
44131@@ -2198,6 +2555,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44132 } else {
44133 struct inode *inode;
44134
44135+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
44136+
44137 if (cprm.limit < binfmt->min_coredump)
44138 goto fail_unlock;
44139
44140@@ -2241,7 +2600,7 @@ close_fail:
44141 filp_close(cprm.file, NULL);
44142 fail_dropcount:
44143 if (ispipe)
44144- atomic_dec(&core_dump_count);
44145+ atomic_dec_unchecked(&core_dump_count);
44146 fail_unlock:
44147 kfree(cn.corename);
44148 fail_corename:
44149@@ -2260,7 +2619,7 @@ fail:
44150 */
44151 int dump_write(struct file *file, const void *addr, int nr)
44152 {
44153- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
44154+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
44155 }
44156 EXPORT_SYMBOL(dump_write);
44157
44158diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
44159index a8cbe1b..fed04cb 100644
44160--- a/fs/ext2/balloc.c
44161+++ b/fs/ext2/balloc.c
44162@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
44163
44164 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44165 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44166- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44167+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
44168 sbi->s_resuid != current_fsuid() &&
44169 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44170 return 0;
44171diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
44172index baac1b1..1499b62 100644
44173--- a/fs/ext3/balloc.c
44174+++ b/fs/ext3/balloc.c
44175@@ -1438,9 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
44176
44177 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44178 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44179- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44180+ if (free_blocks < root_blocks + 1 &&
44181 !use_reservation && sbi->s_resuid != current_fsuid() &&
44182- (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44183+ (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
44184+ !capable_nolog(CAP_SYS_RESOURCE)) {
44185 return 0;
44186 }
44187 return 1;
44188diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
44189index 8da837b..ed3835b 100644
44190--- a/fs/ext4/balloc.c
44191+++ b/fs/ext4/balloc.c
44192@@ -463,8 +463,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
44193 /* Hm, nope. Are (enough) root reserved clusters available? */
44194 if (sbi->s_resuid == current_fsuid() ||
44195 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
44196- capable(CAP_SYS_RESOURCE) ||
44197- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
44198+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
44199+ capable_nolog(CAP_SYS_RESOURCE)) {
44200
44201 if (free_clusters >= (nclusters + dirty_clusters))
44202 return 1;
44203diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
44204index 0e01e90..ae2bd5e 100644
44205--- a/fs/ext4/ext4.h
44206+++ b/fs/ext4/ext4.h
44207@@ -1225,19 +1225,19 @@ struct ext4_sb_info {
44208 unsigned long s_mb_last_start;
44209
44210 /* stats for buddy allocator */
44211- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
44212- atomic_t s_bal_success; /* we found long enough chunks */
44213- atomic_t s_bal_allocated; /* in blocks */
44214- atomic_t s_bal_ex_scanned; /* total extents scanned */
44215- atomic_t s_bal_goals; /* goal hits */
44216- atomic_t s_bal_breaks; /* too long searches */
44217- atomic_t s_bal_2orders; /* 2^order hits */
44218+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
44219+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
44220+ atomic_unchecked_t s_bal_allocated; /* in blocks */
44221+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
44222+ atomic_unchecked_t s_bal_goals; /* goal hits */
44223+ atomic_unchecked_t s_bal_breaks; /* too long searches */
44224+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
44225 spinlock_t s_bal_lock;
44226 unsigned long s_mb_buddies_generated;
44227 unsigned long long s_mb_generation_time;
44228- atomic_t s_mb_lost_chunks;
44229- atomic_t s_mb_preallocated;
44230- atomic_t s_mb_discarded;
44231+ atomic_unchecked_t s_mb_lost_chunks;
44232+ atomic_unchecked_t s_mb_preallocated;
44233+ atomic_unchecked_t s_mb_discarded;
44234 atomic_t s_lock_busy;
44235
44236 /* locality groups */
44237diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
44238index 6b0a57e..1955a44 100644
44239--- a/fs/ext4/mballoc.c
44240+++ b/fs/ext4/mballoc.c
44241@@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
44242 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
44243
44244 if (EXT4_SB(sb)->s_mb_stats)
44245- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
44246+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
44247
44248 break;
44249 }
44250@@ -2041,7 +2041,7 @@ repeat:
44251 ac->ac_status = AC_STATUS_CONTINUE;
44252 ac->ac_flags |= EXT4_MB_HINT_FIRST;
44253 cr = 3;
44254- atomic_inc(&sbi->s_mb_lost_chunks);
44255+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
44256 goto repeat;
44257 }
44258 }
44259@@ -2545,25 +2545,25 @@ int ext4_mb_release(struct super_block *sb)
44260 if (sbi->s_mb_stats) {
44261 ext4_msg(sb, KERN_INFO,
44262 "mballoc: %u blocks %u reqs (%u success)",
44263- atomic_read(&sbi->s_bal_allocated),
44264- atomic_read(&sbi->s_bal_reqs),
44265- atomic_read(&sbi->s_bal_success));
44266+ atomic_read_unchecked(&sbi->s_bal_allocated),
44267+ atomic_read_unchecked(&sbi->s_bal_reqs),
44268+ atomic_read_unchecked(&sbi->s_bal_success));
44269 ext4_msg(sb, KERN_INFO,
44270 "mballoc: %u extents scanned, %u goal hits, "
44271 "%u 2^N hits, %u breaks, %u lost",
44272- atomic_read(&sbi->s_bal_ex_scanned),
44273- atomic_read(&sbi->s_bal_goals),
44274- atomic_read(&sbi->s_bal_2orders),
44275- atomic_read(&sbi->s_bal_breaks),
44276- atomic_read(&sbi->s_mb_lost_chunks));
44277+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
44278+ atomic_read_unchecked(&sbi->s_bal_goals),
44279+ atomic_read_unchecked(&sbi->s_bal_2orders),
44280+ atomic_read_unchecked(&sbi->s_bal_breaks),
44281+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
44282 ext4_msg(sb, KERN_INFO,
44283 "mballoc: %lu generated and it took %Lu",
44284 sbi->s_mb_buddies_generated,
44285 sbi->s_mb_generation_time);
44286 ext4_msg(sb, KERN_INFO,
44287 "mballoc: %u preallocated, %u discarded",
44288- atomic_read(&sbi->s_mb_preallocated),
44289- atomic_read(&sbi->s_mb_discarded));
44290+ atomic_read_unchecked(&sbi->s_mb_preallocated),
44291+ atomic_read_unchecked(&sbi->s_mb_discarded));
44292 }
44293
44294 free_percpu(sbi->s_locality_groups);
44295@@ -3045,16 +3045,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
44296 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
44297
44298 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
44299- atomic_inc(&sbi->s_bal_reqs);
44300- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44301+ atomic_inc_unchecked(&sbi->s_bal_reqs);
44302+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44303 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
44304- atomic_inc(&sbi->s_bal_success);
44305- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
44306+ atomic_inc_unchecked(&sbi->s_bal_success);
44307+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
44308 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
44309 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
44310- atomic_inc(&sbi->s_bal_goals);
44311+ atomic_inc_unchecked(&sbi->s_bal_goals);
44312 if (ac->ac_found > sbi->s_mb_max_to_scan)
44313- atomic_inc(&sbi->s_bal_breaks);
44314+ atomic_inc_unchecked(&sbi->s_bal_breaks);
44315 }
44316
44317 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
44318@@ -3458,7 +3458,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
44319 trace_ext4_mb_new_inode_pa(ac, pa);
44320
44321 ext4_mb_use_inode_pa(ac, pa);
44322- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
44323+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
44324
44325 ei = EXT4_I(ac->ac_inode);
44326 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44327@@ -3518,7 +3518,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
44328 trace_ext4_mb_new_group_pa(ac, pa);
44329
44330 ext4_mb_use_group_pa(ac, pa);
44331- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44332+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44333
44334 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44335 lg = ac->ac_lg;
44336@@ -3607,7 +3607,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
44337 * from the bitmap and continue.
44338 */
44339 }
44340- atomic_add(free, &sbi->s_mb_discarded);
44341+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
44342
44343 return err;
44344 }
44345@@ -3625,7 +3625,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
44346 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
44347 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
44348 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
44349- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44350+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44351 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
44352
44353 return 0;
44354diff --git a/fs/fcntl.c b/fs/fcntl.c
44355index 75e7c1f..1eb3e4d 100644
44356--- a/fs/fcntl.c
44357+++ b/fs/fcntl.c
44358@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
44359 if (err)
44360 return err;
44361
44362+ if (gr_handle_chroot_fowner(pid, type))
44363+ return -ENOENT;
44364+ if (gr_check_protected_task_fowner(pid, type))
44365+ return -EACCES;
44366+
44367 f_modown(filp, pid, type, force);
44368 return 0;
44369 }
44370@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
44371
44372 static int f_setown_ex(struct file *filp, unsigned long arg)
44373 {
44374- struct f_owner_ex * __user owner_p = (void * __user)arg;
44375+ struct f_owner_ex __user *owner_p = (void __user *)arg;
44376 struct f_owner_ex owner;
44377 struct pid *pid;
44378 int type;
44379@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
44380
44381 static int f_getown_ex(struct file *filp, unsigned long arg)
44382 {
44383- struct f_owner_ex * __user owner_p = (void * __user)arg;
44384+ struct f_owner_ex __user *owner_p = (void __user *)arg;
44385 struct f_owner_ex owner;
44386 int ret = 0;
44387
44388@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
44389 switch (cmd) {
44390 case F_DUPFD:
44391 case F_DUPFD_CLOEXEC:
44392+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
44393 if (arg >= rlimit(RLIMIT_NOFILE))
44394 break;
44395 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
44396diff --git a/fs/fifo.c b/fs/fifo.c
44397index b1a524d..4ee270e 100644
44398--- a/fs/fifo.c
44399+++ b/fs/fifo.c
44400@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
44401 */
44402 filp->f_op = &read_pipefifo_fops;
44403 pipe->r_counter++;
44404- if (pipe->readers++ == 0)
44405+ if (atomic_inc_return(&pipe->readers) == 1)
44406 wake_up_partner(inode);
44407
44408- if (!pipe->writers) {
44409+ if (!atomic_read(&pipe->writers)) {
44410 if ((filp->f_flags & O_NONBLOCK)) {
44411 /* suppress POLLHUP until we have
44412 * seen a writer */
44413@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
44414 * errno=ENXIO when there is no process reading the FIFO.
44415 */
44416 ret = -ENXIO;
44417- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
44418+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
44419 goto err;
44420
44421 filp->f_op = &write_pipefifo_fops;
44422 pipe->w_counter++;
44423- if (!pipe->writers++)
44424+ if (atomic_inc_return(&pipe->writers) == 1)
44425 wake_up_partner(inode);
44426
44427- if (!pipe->readers) {
44428+ if (!atomic_read(&pipe->readers)) {
44429 wait_for_partner(inode, &pipe->r_counter);
44430 if (signal_pending(current))
44431 goto err_wr;
44432@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
44433 */
44434 filp->f_op = &rdwr_pipefifo_fops;
44435
44436- pipe->readers++;
44437- pipe->writers++;
44438+ atomic_inc(&pipe->readers);
44439+ atomic_inc(&pipe->writers);
44440 pipe->r_counter++;
44441 pipe->w_counter++;
44442- if (pipe->readers == 1 || pipe->writers == 1)
44443+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
44444 wake_up_partner(inode);
44445 break;
44446
44447@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
44448 return 0;
44449
44450 err_rd:
44451- if (!--pipe->readers)
44452+ if (atomic_dec_and_test(&pipe->readers))
44453 wake_up_interruptible(&pipe->wait);
44454 ret = -ERESTARTSYS;
44455 goto err;
44456
44457 err_wr:
44458- if (!--pipe->writers)
44459+ if (atomic_dec_and_test(&pipe->writers))
44460 wake_up_interruptible(&pipe->wait);
44461 ret = -ERESTARTSYS;
44462 goto err;
44463
44464 err:
44465- if (!pipe->readers && !pipe->writers)
44466+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
44467 free_pipe_info(inode);
44468
44469 err_nocleanup:
44470diff --git a/fs/file.c b/fs/file.c
44471index ba3f605..fade102 100644
44472--- a/fs/file.c
44473+++ b/fs/file.c
44474@@ -15,6 +15,7 @@
44475 #include <linux/slab.h>
44476 #include <linux/vmalloc.h>
44477 #include <linux/file.h>
44478+#include <linux/security.h>
44479 #include <linux/fdtable.h>
44480 #include <linux/bitops.h>
44481 #include <linux/interrupt.h>
44482@@ -255,6 +256,7 @@ int expand_files(struct files_struct *files, int nr)
44483 * N.B. For clone tasks sharing a files structure, this test
44484 * will limit the total number of files that can be opened.
44485 */
44486+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
44487 if (nr >= rlimit(RLIMIT_NOFILE))
44488 return -EMFILE;
44489
44490diff --git a/fs/filesystems.c b/fs/filesystems.c
44491index 96f2428..f5eeb8e 100644
44492--- a/fs/filesystems.c
44493+++ b/fs/filesystems.c
44494@@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
44495 int len = dot ? dot - name : strlen(name);
44496
44497 fs = __get_fs_type(name, len);
44498+
44499+#ifdef CONFIG_GRKERNSEC_MODHARDEN
44500+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
44501+#else
44502 if (!fs && (request_module("%.*s", len, name) == 0))
44503+#endif
44504 fs = __get_fs_type(name, len);
44505
44506 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
44507diff --git a/fs/fs_struct.c b/fs/fs_struct.c
44508index e159e68..e7d2a6f 100644
44509--- a/fs/fs_struct.c
44510+++ b/fs/fs_struct.c
44511@@ -4,6 +4,7 @@
44512 #include <linux/path.h>
44513 #include <linux/slab.h>
44514 #include <linux/fs_struct.h>
44515+#include <linux/grsecurity.h>
44516 #include "internal.h"
44517
44518 static inline void path_get_longterm(struct path *path)
44519@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
44520 write_seqcount_begin(&fs->seq);
44521 old_root = fs->root;
44522 fs->root = *path;
44523+ gr_set_chroot_entries(current, path);
44524 write_seqcount_end(&fs->seq);
44525 spin_unlock(&fs->lock);
44526 if (old_root.dentry)
44527@@ -65,6 +67,17 @@ static inline int replace_path(struct path *p, const struct path *old, const str
44528 return 1;
44529 }
44530
44531+static inline int replace_root_path(struct task_struct *task, struct path *p, const struct path *old, struct path *new)
44532+{
44533+ if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
44534+ return 0;
44535+ *p = *new;
44536+
44537+ gr_set_chroot_entries(task, new);
44538+
44539+ return 1;
44540+}
44541+
44542 void chroot_fs_refs(struct path *old_root, struct path *new_root)
44543 {
44544 struct task_struct *g, *p;
44545@@ -79,7 +92,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
44546 int hits = 0;
44547 spin_lock(&fs->lock);
44548 write_seqcount_begin(&fs->seq);
44549- hits += replace_path(&fs->root, old_root, new_root);
44550+ hits += replace_root_path(p, &fs->root, old_root, new_root);
44551 hits += replace_path(&fs->pwd, old_root, new_root);
44552 write_seqcount_end(&fs->seq);
44553 while (hits--) {
44554@@ -111,7 +124,8 @@ void exit_fs(struct task_struct *tsk)
44555 task_lock(tsk);
44556 spin_lock(&fs->lock);
44557 tsk->fs = NULL;
44558- kill = !--fs->users;
44559+ gr_clear_chroot_entries(tsk);
44560+ kill = !atomic_dec_return(&fs->users);
44561 spin_unlock(&fs->lock);
44562 task_unlock(tsk);
44563 if (kill)
44564@@ -124,7 +138,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44565 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
44566 /* We don't need to lock fs - think why ;-) */
44567 if (fs) {
44568- fs->users = 1;
44569+ atomic_set(&fs->users, 1);
44570 fs->in_exec = 0;
44571 spin_lock_init(&fs->lock);
44572 seqcount_init(&fs->seq);
44573@@ -133,6 +147,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44574 spin_lock(&old->lock);
44575 fs->root = old->root;
44576 path_get_longterm(&fs->root);
44577+ /* instead of calling gr_set_chroot_entries here,
44578+ we call it from every caller of this function
44579+ */
44580 fs->pwd = old->pwd;
44581 path_get_longterm(&fs->pwd);
44582 spin_unlock(&old->lock);
44583@@ -151,8 +168,9 @@ int unshare_fs_struct(void)
44584
44585 task_lock(current);
44586 spin_lock(&fs->lock);
44587- kill = !--fs->users;
44588+ kill = !atomic_dec_return(&fs->users);
44589 current->fs = new_fs;
44590+ gr_set_chroot_entries(current, &new_fs->root);
44591 spin_unlock(&fs->lock);
44592 task_unlock(current);
44593
44594@@ -165,13 +183,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
44595
44596 int current_umask(void)
44597 {
44598- return current->fs->umask;
44599+ return current->fs->umask | gr_acl_umask();
44600 }
44601 EXPORT_SYMBOL(current_umask);
44602
44603 /* to be mentioned only in INIT_TASK */
44604 struct fs_struct init_fs = {
44605- .users = 1,
44606+ .users = ATOMIC_INIT(1),
44607 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
44608 .seq = SEQCNT_ZERO,
44609 .umask = 0022,
44610@@ -187,12 +205,13 @@ void daemonize_fs_struct(void)
44611 task_lock(current);
44612
44613 spin_lock(&init_fs.lock);
44614- init_fs.users++;
44615+ atomic_inc(&init_fs.users);
44616 spin_unlock(&init_fs.lock);
44617
44618 spin_lock(&fs->lock);
44619 current->fs = &init_fs;
44620- kill = !--fs->users;
44621+ gr_set_chroot_entries(current, &current->fs->root);
44622+ kill = !atomic_dec_return(&fs->users);
44623 spin_unlock(&fs->lock);
44624
44625 task_unlock(current);
44626diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
44627index 9905350..02eaec4 100644
44628--- a/fs/fscache/cookie.c
44629+++ b/fs/fscache/cookie.c
44630@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
44631 parent ? (char *) parent->def->name : "<no-parent>",
44632 def->name, netfs_data);
44633
44634- fscache_stat(&fscache_n_acquires);
44635+ fscache_stat_unchecked(&fscache_n_acquires);
44636
44637 /* if there's no parent cookie, then we don't create one here either */
44638 if (!parent) {
44639- fscache_stat(&fscache_n_acquires_null);
44640+ fscache_stat_unchecked(&fscache_n_acquires_null);
44641 _leave(" [no parent]");
44642 return NULL;
44643 }
44644@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
44645 /* allocate and initialise a cookie */
44646 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
44647 if (!cookie) {
44648- fscache_stat(&fscache_n_acquires_oom);
44649+ fscache_stat_unchecked(&fscache_n_acquires_oom);
44650 _leave(" [ENOMEM]");
44651 return NULL;
44652 }
44653@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44654
44655 switch (cookie->def->type) {
44656 case FSCACHE_COOKIE_TYPE_INDEX:
44657- fscache_stat(&fscache_n_cookie_index);
44658+ fscache_stat_unchecked(&fscache_n_cookie_index);
44659 break;
44660 case FSCACHE_COOKIE_TYPE_DATAFILE:
44661- fscache_stat(&fscache_n_cookie_data);
44662+ fscache_stat_unchecked(&fscache_n_cookie_data);
44663 break;
44664 default:
44665- fscache_stat(&fscache_n_cookie_special);
44666+ fscache_stat_unchecked(&fscache_n_cookie_special);
44667 break;
44668 }
44669
44670@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44671 if (fscache_acquire_non_index_cookie(cookie) < 0) {
44672 atomic_dec(&parent->n_children);
44673 __fscache_cookie_put(cookie);
44674- fscache_stat(&fscache_n_acquires_nobufs);
44675+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
44676 _leave(" = NULL");
44677 return NULL;
44678 }
44679 }
44680
44681- fscache_stat(&fscache_n_acquires_ok);
44682+ fscache_stat_unchecked(&fscache_n_acquires_ok);
44683 _leave(" = %p", cookie);
44684 return cookie;
44685 }
44686@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
44687 cache = fscache_select_cache_for_object(cookie->parent);
44688 if (!cache) {
44689 up_read(&fscache_addremove_sem);
44690- fscache_stat(&fscache_n_acquires_no_cache);
44691+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
44692 _leave(" = -ENOMEDIUM [no cache]");
44693 return -ENOMEDIUM;
44694 }
44695@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
44696 object = cache->ops->alloc_object(cache, cookie);
44697 fscache_stat_d(&fscache_n_cop_alloc_object);
44698 if (IS_ERR(object)) {
44699- fscache_stat(&fscache_n_object_no_alloc);
44700+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
44701 ret = PTR_ERR(object);
44702 goto error;
44703 }
44704
44705- fscache_stat(&fscache_n_object_alloc);
44706+ fscache_stat_unchecked(&fscache_n_object_alloc);
44707
44708 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
44709
44710@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
44711 struct fscache_object *object;
44712 struct hlist_node *_p;
44713
44714- fscache_stat(&fscache_n_updates);
44715+ fscache_stat_unchecked(&fscache_n_updates);
44716
44717 if (!cookie) {
44718- fscache_stat(&fscache_n_updates_null);
44719+ fscache_stat_unchecked(&fscache_n_updates_null);
44720 _leave(" [no cookie]");
44721 return;
44722 }
44723@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
44724 struct fscache_object *object;
44725 unsigned long event;
44726
44727- fscache_stat(&fscache_n_relinquishes);
44728+ fscache_stat_unchecked(&fscache_n_relinquishes);
44729 if (retire)
44730- fscache_stat(&fscache_n_relinquishes_retire);
44731+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
44732
44733 if (!cookie) {
44734- fscache_stat(&fscache_n_relinquishes_null);
44735+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
44736 _leave(" [no cookie]");
44737 return;
44738 }
44739@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
44740
44741 /* wait for the cookie to finish being instantiated (or to fail) */
44742 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
44743- fscache_stat(&fscache_n_relinquishes_waitcrt);
44744+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
44745 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
44746 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
44747 }
44748diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
44749index f6aad48..88dcf26 100644
44750--- a/fs/fscache/internal.h
44751+++ b/fs/fscache/internal.h
44752@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
44753 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
44754 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
44755
44756-extern atomic_t fscache_n_op_pend;
44757-extern atomic_t fscache_n_op_run;
44758-extern atomic_t fscache_n_op_enqueue;
44759-extern atomic_t fscache_n_op_deferred_release;
44760-extern atomic_t fscache_n_op_release;
44761-extern atomic_t fscache_n_op_gc;
44762-extern atomic_t fscache_n_op_cancelled;
44763-extern atomic_t fscache_n_op_rejected;
44764+extern atomic_unchecked_t fscache_n_op_pend;
44765+extern atomic_unchecked_t fscache_n_op_run;
44766+extern atomic_unchecked_t fscache_n_op_enqueue;
44767+extern atomic_unchecked_t fscache_n_op_deferred_release;
44768+extern atomic_unchecked_t fscache_n_op_release;
44769+extern atomic_unchecked_t fscache_n_op_gc;
44770+extern atomic_unchecked_t fscache_n_op_cancelled;
44771+extern atomic_unchecked_t fscache_n_op_rejected;
44772
44773-extern atomic_t fscache_n_attr_changed;
44774-extern atomic_t fscache_n_attr_changed_ok;
44775-extern atomic_t fscache_n_attr_changed_nobufs;
44776-extern atomic_t fscache_n_attr_changed_nomem;
44777-extern atomic_t fscache_n_attr_changed_calls;
44778+extern atomic_unchecked_t fscache_n_attr_changed;
44779+extern atomic_unchecked_t fscache_n_attr_changed_ok;
44780+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
44781+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
44782+extern atomic_unchecked_t fscache_n_attr_changed_calls;
44783
44784-extern atomic_t fscache_n_allocs;
44785-extern atomic_t fscache_n_allocs_ok;
44786-extern atomic_t fscache_n_allocs_wait;
44787-extern atomic_t fscache_n_allocs_nobufs;
44788-extern atomic_t fscache_n_allocs_intr;
44789-extern atomic_t fscache_n_allocs_object_dead;
44790-extern atomic_t fscache_n_alloc_ops;
44791-extern atomic_t fscache_n_alloc_op_waits;
44792+extern atomic_unchecked_t fscache_n_allocs;
44793+extern atomic_unchecked_t fscache_n_allocs_ok;
44794+extern atomic_unchecked_t fscache_n_allocs_wait;
44795+extern atomic_unchecked_t fscache_n_allocs_nobufs;
44796+extern atomic_unchecked_t fscache_n_allocs_intr;
44797+extern atomic_unchecked_t fscache_n_allocs_object_dead;
44798+extern atomic_unchecked_t fscache_n_alloc_ops;
44799+extern atomic_unchecked_t fscache_n_alloc_op_waits;
44800
44801-extern atomic_t fscache_n_retrievals;
44802-extern atomic_t fscache_n_retrievals_ok;
44803-extern atomic_t fscache_n_retrievals_wait;
44804-extern atomic_t fscache_n_retrievals_nodata;
44805-extern atomic_t fscache_n_retrievals_nobufs;
44806-extern atomic_t fscache_n_retrievals_intr;
44807-extern atomic_t fscache_n_retrievals_nomem;
44808-extern atomic_t fscache_n_retrievals_object_dead;
44809-extern atomic_t fscache_n_retrieval_ops;
44810-extern atomic_t fscache_n_retrieval_op_waits;
44811+extern atomic_unchecked_t fscache_n_retrievals;
44812+extern atomic_unchecked_t fscache_n_retrievals_ok;
44813+extern atomic_unchecked_t fscache_n_retrievals_wait;
44814+extern atomic_unchecked_t fscache_n_retrievals_nodata;
44815+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
44816+extern atomic_unchecked_t fscache_n_retrievals_intr;
44817+extern atomic_unchecked_t fscache_n_retrievals_nomem;
44818+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
44819+extern atomic_unchecked_t fscache_n_retrieval_ops;
44820+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
44821
44822-extern atomic_t fscache_n_stores;
44823-extern atomic_t fscache_n_stores_ok;
44824-extern atomic_t fscache_n_stores_again;
44825-extern atomic_t fscache_n_stores_nobufs;
44826-extern atomic_t fscache_n_stores_oom;
44827-extern atomic_t fscache_n_store_ops;
44828-extern atomic_t fscache_n_store_calls;
44829-extern atomic_t fscache_n_store_pages;
44830-extern atomic_t fscache_n_store_radix_deletes;
44831-extern atomic_t fscache_n_store_pages_over_limit;
44832+extern atomic_unchecked_t fscache_n_stores;
44833+extern atomic_unchecked_t fscache_n_stores_ok;
44834+extern atomic_unchecked_t fscache_n_stores_again;
44835+extern atomic_unchecked_t fscache_n_stores_nobufs;
44836+extern atomic_unchecked_t fscache_n_stores_oom;
44837+extern atomic_unchecked_t fscache_n_store_ops;
44838+extern atomic_unchecked_t fscache_n_store_calls;
44839+extern atomic_unchecked_t fscache_n_store_pages;
44840+extern atomic_unchecked_t fscache_n_store_radix_deletes;
44841+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
44842
44843-extern atomic_t fscache_n_store_vmscan_not_storing;
44844-extern atomic_t fscache_n_store_vmscan_gone;
44845-extern atomic_t fscache_n_store_vmscan_busy;
44846-extern atomic_t fscache_n_store_vmscan_cancelled;
44847+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
44848+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
44849+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
44850+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
44851
44852-extern atomic_t fscache_n_marks;
44853-extern atomic_t fscache_n_uncaches;
44854+extern atomic_unchecked_t fscache_n_marks;
44855+extern atomic_unchecked_t fscache_n_uncaches;
44856
44857-extern atomic_t fscache_n_acquires;
44858-extern atomic_t fscache_n_acquires_null;
44859-extern atomic_t fscache_n_acquires_no_cache;
44860-extern atomic_t fscache_n_acquires_ok;
44861-extern atomic_t fscache_n_acquires_nobufs;
44862-extern atomic_t fscache_n_acquires_oom;
44863+extern atomic_unchecked_t fscache_n_acquires;
44864+extern atomic_unchecked_t fscache_n_acquires_null;
44865+extern atomic_unchecked_t fscache_n_acquires_no_cache;
44866+extern atomic_unchecked_t fscache_n_acquires_ok;
44867+extern atomic_unchecked_t fscache_n_acquires_nobufs;
44868+extern atomic_unchecked_t fscache_n_acquires_oom;
44869
44870-extern atomic_t fscache_n_updates;
44871-extern atomic_t fscache_n_updates_null;
44872-extern atomic_t fscache_n_updates_run;
44873+extern atomic_unchecked_t fscache_n_updates;
44874+extern atomic_unchecked_t fscache_n_updates_null;
44875+extern atomic_unchecked_t fscache_n_updates_run;
44876
44877-extern atomic_t fscache_n_relinquishes;
44878-extern atomic_t fscache_n_relinquishes_null;
44879-extern atomic_t fscache_n_relinquishes_waitcrt;
44880-extern atomic_t fscache_n_relinquishes_retire;
44881+extern atomic_unchecked_t fscache_n_relinquishes;
44882+extern atomic_unchecked_t fscache_n_relinquishes_null;
44883+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
44884+extern atomic_unchecked_t fscache_n_relinquishes_retire;
44885
44886-extern atomic_t fscache_n_cookie_index;
44887-extern atomic_t fscache_n_cookie_data;
44888-extern atomic_t fscache_n_cookie_special;
44889+extern atomic_unchecked_t fscache_n_cookie_index;
44890+extern atomic_unchecked_t fscache_n_cookie_data;
44891+extern atomic_unchecked_t fscache_n_cookie_special;
44892
44893-extern atomic_t fscache_n_object_alloc;
44894-extern atomic_t fscache_n_object_no_alloc;
44895-extern atomic_t fscache_n_object_lookups;
44896-extern atomic_t fscache_n_object_lookups_negative;
44897-extern atomic_t fscache_n_object_lookups_positive;
44898-extern atomic_t fscache_n_object_lookups_timed_out;
44899-extern atomic_t fscache_n_object_created;
44900-extern atomic_t fscache_n_object_avail;
44901-extern atomic_t fscache_n_object_dead;
44902+extern atomic_unchecked_t fscache_n_object_alloc;
44903+extern atomic_unchecked_t fscache_n_object_no_alloc;
44904+extern atomic_unchecked_t fscache_n_object_lookups;
44905+extern atomic_unchecked_t fscache_n_object_lookups_negative;
44906+extern atomic_unchecked_t fscache_n_object_lookups_positive;
44907+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
44908+extern atomic_unchecked_t fscache_n_object_created;
44909+extern atomic_unchecked_t fscache_n_object_avail;
44910+extern atomic_unchecked_t fscache_n_object_dead;
44911
44912-extern atomic_t fscache_n_checkaux_none;
44913-extern atomic_t fscache_n_checkaux_okay;
44914-extern atomic_t fscache_n_checkaux_update;
44915-extern atomic_t fscache_n_checkaux_obsolete;
44916+extern atomic_unchecked_t fscache_n_checkaux_none;
44917+extern atomic_unchecked_t fscache_n_checkaux_okay;
44918+extern atomic_unchecked_t fscache_n_checkaux_update;
44919+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
44920
44921 extern atomic_t fscache_n_cop_alloc_object;
44922 extern atomic_t fscache_n_cop_lookup_object;
44923@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
44924 atomic_inc(stat);
44925 }
44926
44927+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
44928+{
44929+ atomic_inc_unchecked(stat);
44930+}
44931+
44932 static inline void fscache_stat_d(atomic_t *stat)
44933 {
44934 atomic_dec(stat);
44935@@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
44936
44937 #define __fscache_stat(stat) (NULL)
44938 #define fscache_stat(stat) do {} while (0)
44939+#define fscache_stat_unchecked(stat) do {} while (0)
44940 #define fscache_stat_d(stat) do {} while (0)
44941 #endif
44942
44943diff --git a/fs/fscache/object.c b/fs/fscache/object.c
44944index b6b897c..0ffff9c 100644
44945--- a/fs/fscache/object.c
44946+++ b/fs/fscache/object.c
44947@@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44948 /* update the object metadata on disk */
44949 case FSCACHE_OBJECT_UPDATING:
44950 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
44951- fscache_stat(&fscache_n_updates_run);
44952+ fscache_stat_unchecked(&fscache_n_updates_run);
44953 fscache_stat(&fscache_n_cop_update_object);
44954 object->cache->ops->update_object(object);
44955 fscache_stat_d(&fscache_n_cop_update_object);
44956@@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44957 spin_lock(&object->lock);
44958 object->state = FSCACHE_OBJECT_DEAD;
44959 spin_unlock(&object->lock);
44960- fscache_stat(&fscache_n_object_dead);
44961+ fscache_stat_unchecked(&fscache_n_object_dead);
44962 goto terminal_transit;
44963
44964 /* handle the parent cache of this object being withdrawn from
44965@@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44966 spin_lock(&object->lock);
44967 object->state = FSCACHE_OBJECT_DEAD;
44968 spin_unlock(&object->lock);
44969- fscache_stat(&fscache_n_object_dead);
44970+ fscache_stat_unchecked(&fscache_n_object_dead);
44971 goto terminal_transit;
44972
44973 /* complain about the object being woken up once it is
44974@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
44975 parent->cookie->def->name, cookie->def->name,
44976 object->cache->tag->name);
44977
44978- fscache_stat(&fscache_n_object_lookups);
44979+ fscache_stat_unchecked(&fscache_n_object_lookups);
44980 fscache_stat(&fscache_n_cop_lookup_object);
44981 ret = object->cache->ops->lookup_object(object);
44982 fscache_stat_d(&fscache_n_cop_lookup_object);
44983@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
44984 if (ret == -ETIMEDOUT) {
44985 /* probably stuck behind another object, so move this one to
44986 * the back of the queue */
44987- fscache_stat(&fscache_n_object_lookups_timed_out);
44988+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
44989 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
44990 }
44991
44992@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
44993
44994 spin_lock(&object->lock);
44995 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
44996- fscache_stat(&fscache_n_object_lookups_negative);
44997+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
44998
44999 /* transit here to allow write requests to begin stacking up
45000 * and read requests to begin returning ENODATA */
45001@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
45002 * result, in which case there may be data available */
45003 spin_lock(&object->lock);
45004 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45005- fscache_stat(&fscache_n_object_lookups_positive);
45006+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
45007
45008 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
45009
45010@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
45011 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45012 } else {
45013 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
45014- fscache_stat(&fscache_n_object_created);
45015+ fscache_stat_unchecked(&fscache_n_object_created);
45016
45017 object->state = FSCACHE_OBJECT_AVAILABLE;
45018 spin_unlock(&object->lock);
45019@@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
45020 fscache_enqueue_dependents(object);
45021
45022 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
45023- fscache_stat(&fscache_n_object_avail);
45024+ fscache_stat_unchecked(&fscache_n_object_avail);
45025
45026 _leave("");
45027 }
45028@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
45029 enum fscache_checkaux result;
45030
45031 if (!object->cookie->def->check_aux) {
45032- fscache_stat(&fscache_n_checkaux_none);
45033+ fscache_stat_unchecked(&fscache_n_checkaux_none);
45034 return FSCACHE_CHECKAUX_OKAY;
45035 }
45036
45037@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
45038 switch (result) {
45039 /* entry okay as is */
45040 case FSCACHE_CHECKAUX_OKAY:
45041- fscache_stat(&fscache_n_checkaux_okay);
45042+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
45043 break;
45044
45045 /* entry requires update */
45046 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
45047- fscache_stat(&fscache_n_checkaux_update);
45048+ fscache_stat_unchecked(&fscache_n_checkaux_update);
45049 break;
45050
45051 /* entry requires deletion */
45052 case FSCACHE_CHECKAUX_OBSOLETE:
45053- fscache_stat(&fscache_n_checkaux_obsolete);
45054+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
45055 break;
45056
45057 default:
45058diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
45059index 30afdfa..2256596 100644
45060--- a/fs/fscache/operation.c
45061+++ b/fs/fscache/operation.c
45062@@ -17,7 +17,7 @@
45063 #include <linux/slab.h>
45064 #include "internal.h"
45065
45066-atomic_t fscache_op_debug_id;
45067+atomic_unchecked_t fscache_op_debug_id;
45068 EXPORT_SYMBOL(fscache_op_debug_id);
45069
45070 /**
45071@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
45072 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
45073 ASSERTCMP(atomic_read(&op->usage), >, 0);
45074
45075- fscache_stat(&fscache_n_op_enqueue);
45076+ fscache_stat_unchecked(&fscache_n_op_enqueue);
45077 switch (op->flags & FSCACHE_OP_TYPE) {
45078 case FSCACHE_OP_ASYNC:
45079 _debug("queue async");
45080@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
45081 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
45082 if (op->processor)
45083 fscache_enqueue_operation(op);
45084- fscache_stat(&fscache_n_op_run);
45085+ fscache_stat_unchecked(&fscache_n_op_run);
45086 }
45087
45088 /*
45089@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45090 if (object->n_ops > 1) {
45091 atomic_inc(&op->usage);
45092 list_add_tail(&op->pend_link, &object->pending_ops);
45093- fscache_stat(&fscache_n_op_pend);
45094+ fscache_stat_unchecked(&fscache_n_op_pend);
45095 } else if (!list_empty(&object->pending_ops)) {
45096 atomic_inc(&op->usage);
45097 list_add_tail(&op->pend_link, &object->pending_ops);
45098- fscache_stat(&fscache_n_op_pend);
45099+ fscache_stat_unchecked(&fscache_n_op_pend);
45100 fscache_start_operations(object);
45101 } else {
45102 ASSERTCMP(object->n_in_progress, ==, 0);
45103@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45104 object->n_exclusive++; /* reads and writes must wait */
45105 atomic_inc(&op->usage);
45106 list_add_tail(&op->pend_link, &object->pending_ops);
45107- fscache_stat(&fscache_n_op_pend);
45108+ fscache_stat_unchecked(&fscache_n_op_pend);
45109 ret = 0;
45110 } else {
45111 /* not allowed to submit ops in any other state */
45112@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
45113 if (object->n_exclusive > 0) {
45114 atomic_inc(&op->usage);
45115 list_add_tail(&op->pend_link, &object->pending_ops);
45116- fscache_stat(&fscache_n_op_pend);
45117+ fscache_stat_unchecked(&fscache_n_op_pend);
45118 } else if (!list_empty(&object->pending_ops)) {
45119 atomic_inc(&op->usage);
45120 list_add_tail(&op->pend_link, &object->pending_ops);
45121- fscache_stat(&fscache_n_op_pend);
45122+ fscache_stat_unchecked(&fscache_n_op_pend);
45123 fscache_start_operations(object);
45124 } else {
45125 ASSERTCMP(object->n_exclusive, ==, 0);
45126@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
45127 object->n_ops++;
45128 atomic_inc(&op->usage);
45129 list_add_tail(&op->pend_link, &object->pending_ops);
45130- fscache_stat(&fscache_n_op_pend);
45131+ fscache_stat_unchecked(&fscache_n_op_pend);
45132 ret = 0;
45133 } else if (object->state == FSCACHE_OBJECT_DYING ||
45134 object->state == FSCACHE_OBJECT_LC_DYING ||
45135 object->state == FSCACHE_OBJECT_WITHDRAWING) {
45136- fscache_stat(&fscache_n_op_rejected);
45137+ fscache_stat_unchecked(&fscache_n_op_rejected);
45138 ret = -ENOBUFS;
45139 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
45140 fscache_report_unexpected_submission(object, op, ostate);
45141@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
45142
45143 ret = -EBUSY;
45144 if (!list_empty(&op->pend_link)) {
45145- fscache_stat(&fscache_n_op_cancelled);
45146+ fscache_stat_unchecked(&fscache_n_op_cancelled);
45147 list_del_init(&op->pend_link);
45148 object->n_ops--;
45149 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
45150@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
45151 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
45152 BUG();
45153
45154- fscache_stat(&fscache_n_op_release);
45155+ fscache_stat_unchecked(&fscache_n_op_release);
45156
45157 if (op->release) {
45158 op->release(op);
45159@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
45160 * lock, and defer it otherwise */
45161 if (!spin_trylock(&object->lock)) {
45162 _debug("defer put");
45163- fscache_stat(&fscache_n_op_deferred_release);
45164+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
45165
45166 cache = object->cache;
45167 spin_lock(&cache->op_gc_list_lock);
45168@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
45169
45170 _debug("GC DEFERRED REL OBJ%x OP%x",
45171 object->debug_id, op->debug_id);
45172- fscache_stat(&fscache_n_op_gc);
45173+ fscache_stat_unchecked(&fscache_n_op_gc);
45174
45175 ASSERTCMP(atomic_read(&op->usage), ==, 0);
45176
45177diff --git a/fs/fscache/page.c b/fs/fscache/page.c
45178index 3f7a59b..cf196cc 100644
45179--- a/fs/fscache/page.c
45180+++ b/fs/fscache/page.c
45181@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45182 val = radix_tree_lookup(&cookie->stores, page->index);
45183 if (!val) {
45184 rcu_read_unlock();
45185- fscache_stat(&fscache_n_store_vmscan_not_storing);
45186+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
45187 __fscache_uncache_page(cookie, page);
45188 return true;
45189 }
45190@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45191 spin_unlock(&cookie->stores_lock);
45192
45193 if (xpage) {
45194- fscache_stat(&fscache_n_store_vmscan_cancelled);
45195- fscache_stat(&fscache_n_store_radix_deletes);
45196+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
45197+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45198 ASSERTCMP(xpage, ==, page);
45199 } else {
45200- fscache_stat(&fscache_n_store_vmscan_gone);
45201+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
45202 }
45203
45204 wake_up_bit(&cookie->flags, 0);
45205@@ -107,7 +107,7 @@ page_busy:
45206 /* we might want to wait here, but that could deadlock the allocator as
45207 * the work threads writing to the cache may all end up sleeping
45208 * on memory allocation */
45209- fscache_stat(&fscache_n_store_vmscan_busy);
45210+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
45211 return false;
45212 }
45213 EXPORT_SYMBOL(__fscache_maybe_release_page);
45214@@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
45215 FSCACHE_COOKIE_STORING_TAG);
45216 if (!radix_tree_tag_get(&cookie->stores, page->index,
45217 FSCACHE_COOKIE_PENDING_TAG)) {
45218- fscache_stat(&fscache_n_store_radix_deletes);
45219+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45220 xpage = radix_tree_delete(&cookie->stores, page->index);
45221 }
45222 spin_unlock(&cookie->stores_lock);
45223@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
45224
45225 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
45226
45227- fscache_stat(&fscache_n_attr_changed_calls);
45228+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
45229
45230 if (fscache_object_is_active(object)) {
45231 fscache_stat(&fscache_n_cop_attr_changed);
45232@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45233
45234 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45235
45236- fscache_stat(&fscache_n_attr_changed);
45237+ fscache_stat_unchecked(&fscache_n_attr_changed);
45238
45239 op = kzalloc(sizeof(*op), GFP_KERNEL);
45240 if (!op) {
45241- fscache_stat(&fscache_n_attr_changed_nomem);
45242+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
45243 _leave(" = -ENOMEM");
45244 return -ENOMEM;
45245 }
45246@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45247 if (fscache_submit_exclusive_op(object, op) < 0)
45248 goto nobufs;
45249 spin_unlock(&cookie->lock);
45250- fscache_stat(&fscache_n_attr_changed_ok);
45251+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
45252 fscache_put_operation(op);
45253 _leave(" = 0");
45254 return 0;
45255@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45256 nobufs:
45257 spin_unlock(&cookie->lock);
45258 kfree(op);
45259- fscache_stat(&fscache_n_attr_changed_nobufs);
45260+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
45261 _leave(" = %d", -ENOBUFS);
45262 return -ENOBUFS;
45263 }
45264@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
45265 /* allocate a retrieval operation and attempt to submit it */
45266 op = kzalloc(sizeof(*op), GFP_NOIO);
45267 if (!op) {
45268- fscache_stat(&fscache_n_retrievals_nomem);
45269+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45270 return NULL;
45271 }
45272
45273@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45274 return 0;
45275 }
45276
45277- fscache_stat(&fscache_n_retrievals_wait);
45278+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
45279
45280 jif = jiffies;
45281 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
45282 fscache_wait_bit_interruptible,
45283 TASK_INTERRUPTIBLE) != 0) {
45284- fscache_stat(&fscache_n_retrievals_intr);
45285+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
45286 _leave(" = -ERESTARTSYS");
45287 return -ERESTARTSYS;
45288 }
45289@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45290 */
45291 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45292 struct fscache_retrieval *op,
45293- atomic_t *stat_op_waits,
45294- atomic_t *stat_object_dead)
45295+ atomic_unchecked_t *stat_op_waits,
45296+ atomic_unchecked_t *stat_object_dead)
45297 {
45298 int ret;
45299
45300@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45301 goto check_if_dead;
45302
45303 _debug(">>> WT");
45304- fscache_stat(stat_op_waits);
45305+ fscache_stat_unchecked(stat_op_waits);
45306 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
45307 fscache_wait_bit_interruptible,
45308 TASK_INTERRUPTIBLE) < 0) {
45309@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45310
45311 check_if_dead:
45312 if (unlikely(fscache_object_is_dead(object))) {
45313- fscache_stat(stat_object_dead);
45314+ fscache_stat_unchecked(stat_object_dead);
45315 return -ENOBUFS;
45316 }
45317 return 0;
45318@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45319
45320 _enter("%p,%p,,,", cookie, page);
45321
45322- fscache_stat(&fscache_n_retrievals);
45323+ fscache_stat_unchecked(&fscache_n_retrievals);
45324
45325 if (hlist_empty(&cookie->backing_objects))
45326 goto nobufs;
45327@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45328 goto nobufs_unlock;
45329 spin_unlock(&cookie->lock);
45330
45331- fscache_stat(&fscache_n_retrieval_ops);
45332+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
45333
45334 /* pin the netfs read context in case we need to do the actual netfs
45335 * read because we've encountered a cache read failure */
45336@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45337
45338 error:
45339 if (ret == -ENOMEM)
45340- fscache_stat(&fscache_n_retrievals_nomem);
45341+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45342 else if (ret == -ERESTARTSYS)
45343- fscache_stat(&fscache_n_retrievals_intr);
45344+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
45345 else if (ret == -ENODATA)
45346- fscache_stat(&fscache_n_retrievals_nodata);
45347+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45348 else if (ret < 0)
45349- fscache_stat(&fscache_n_retrievals_nobufs);
45350+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45351 else
45352- fscache_stat(&fscache_n_retrievals_ok);
45353+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
45354
45355 fscache_put_retrieval(op);
45356 _leave(" = %d", ret);
45357@@ -429,7 +429,7 @@ nobufs_unlock:
45358 spin_unlock(&cookie->lock);
45359 kfree(op);
45360 nobufs:
45361- fscache_stat(&fscache_n_retrievals_nobufs);
45362+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45363 _leave(" = -ENOBUFS");
45364 return -ENOBUFS;
45365 }
45366@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45367
45368 _enter("%p,,%d,,,", cookie, *nr_pages);
45369
45370- fscache_stat(&fscache_n_retrievals);
45371+ fscache_stat_unchecked(&fscache_n_retrievals);
45372
45373 if (hlist_empty(&cookie->backing_objects))
45374 goto nobufs;
45375@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45376 goto nobufs_unlock;
45377 spin_unlock(&cookie->lock);
45378
45379- fscache_stat(&fscache_n_retrieval_ops);
45380+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
45381
45382 /* pin the netfs read context in case we need to do the actual netfs
45383 * read because we've encountered a cache read failure */
45384@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45385
45386 error:
45387 if (ret == -ENOMEM)
45388- fscache_stat(&fscache_n_retrievals_nomem);
45389+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45390 else if (ret == -ERESTARTSYS)
45391- fscache_stat(&fscache_n_retrievals_intr);
45392+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
45393 else if (ret == -ENODATA)
45394- fscache_stat(&fscache_n_retrievals_nodata);
45395+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45396 else if (ret < 0)
45397- fscache_stat(&fscache_n_retrievals_nobufs);
45398+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45399 else
45400- fscache_stat(&fscache_n_retrievals_ok);
45401+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
45402
45403 fscache_put_retrieval(op);
45404 _leave(" = %d", ret);
45405@@ -545,7 +545,7 @@ nobufs_unlock:
45406 spin_unlock(&cookie->lock);
45407 kfree(op);
45408 nobufs:
45409- fscache_stat(&fscache_n_retrievals_nobufs);
45410+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45411 _leave(" = -ENOBUFS");
45412 return -ENOBUFS;
45413 }
45414@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45415
45416 _enter("%p,%p,,,", cookie, page);
45417
45418- fscache_stat(&fscache_n_allocs);
45419+ fscache_stat_unchecked(&fscache_n_allocs);
45420
45421 if (hlist_empty(&cookie->backing_objects))
45422 goto nobufs;
45423@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45424 goto nobufs_unlock;
45425 spin_unlock(&cookie->lock);
45426
45427- fscache_stat(&fscache_n_alloc_ops);
45428+ fscache_stat_unchecked(&fscache_n_alloc_ops);
45429
45430 ret = fscache_wait_for_retrieval_activation(
45431 object, op,
45432@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45433
45434 error:
45435 if (ret == -ERESTARTSYS)
45436- fscache_stat(&fscache_n_allocs_intr);
45437+ fscache_stat_unchecked(&fscache_n_allocs_intr);
45438 else if (ret < 0)
45439- fscache_stat(&fscache_n_allocs_nobufs);
45440+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45441 else
45442- fscache_stat(&fscache_n_allocs_ok);
45443+ fscache_stat_unchecked(&fscache_n_allocs_ok);
45444
45445 fscache_put_retrieval(op);
45446 _leave(" = %d", ret);
45447@@ -625,7 +625,7 @@ nobufs_unlock:
45448 spin_unlock(&cookie->lock);
45449 kfree(op);
45450 nobufs:
45451- fscache_stat(&fscache_n_allocs_nobufs);
45452+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45453 _leave(" = -ENOBUFS");
45454 return -ENOBUFS;
45455 }
45456@@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45457
45458 spin_lock(&cookie->stores_lock);
45459
45460- fscache_stat(&fscache_n_store_calls);
45461+ fscache_stat_unchecked(&fscache_n_store_calls);
45462
45463 /* find a page to store */
45464 page = NULL;
45465@@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45466 page = results[0];
45467 _debug("gang %d [%lx]", n, page->index);
45468 if (page->index > op->store_limit) {
45469- fscache_stat(&fscache_n_store_pages_over_limit);
45470+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
45471 goto superseded;
45472 }
45473
45474@@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45475 spin_unlock(&cookie->stores_lock);
45476 spin_unlock(&object->lock);
45477
45478- fscache_stat(&fscache_n_store_pages);
45479+ fscache_stat_unchecked(&fscache_n_store_pages);
45480 fscache_stat(&fscache_n_cop_write_page);
45481 ret = object->cache->ops->write_page(op, page);
45482 fscache_stat_d(&fscache_n_cop_write_page);
45483@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45484 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45485 ASSERT(PageFsCache(page));
45486
45487- fscache_stat(&fscache_n_stores);
45488+ fscache_stat_unchecked(&fscache_n_stores);
45489
45490 op = kzalloc(sizeof(*op), GFP_NOIO);
45491 if (!op)
45492@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45493 spin_unlock(&cookie->stores_lock);
45494 spin_unlock(&object->lock);
45495
45496- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
45497+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
45498 op->store_limit = object->store_limit;
45499
45500 if (fscache_submit_op(object, &op->op) < 0)
45501@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45502
45503 spin_unlock(&cookie->lock);
45504 radix_tree_preload_end();
45505- fscache_stat(&fscache_n_store_ops);
45506- fscache_stat(&fscache_n_stores_ok);
45507+ fscache_stat_unchecked(&fscache_n_store_ops);
45508+ fscache_stat_unchecked(&fscache_n_stores_ok);
45509
45510 /* the work queue now carries its own ref on the object */
45511 fscache_put_operation(&op->op);
45512@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45513 return 0;
45514
45515 already_queued:
45516- fscache_stat(&fscache_n_stores_again);
45517+ fscache_stat_unchecked(&fscache_n_stores_again);
45518 already_pending:
45519 spin_unlock(&cookie->stores_lock);
45520 spin_unlock(&object->lock);
45521 spin_unlock(&cookie->lock);
45522 radix_tree_preload_end();
45523 kfree(op);
45524- fscache_stat(&fscache_n_stores_ok);
45525+ fscache_stat_unchecked(&fscache_n_stores_ok);
45526 _leave(" = 0");
45527 return 0;
45528
45529@@ -851,14 +851,14 @@ nobufs:
45530 spin_unlock(&cookie->lock);
45531 radix_tree_preload_end();
45532 kfree(op);
45533- fscache_stat(&fscache_n_stores_nobufs);
45534+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
45535 _leave(" = -ENOBUFS");
45536 return -ENOBUFS;
45537
45538 nomem_free:
45539 kfree(op);
45540 nomem:
45541- fscache_stat(&fscache_n_stores_oom);
45542+ fscache_stat_unchecked(&fscache_n_stores_oom);
45543 _leave(" = -ENOMEM");
45544 return -ENOMEM;
45545 }
45546@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
45547 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45548 ASSERTCMP(page, !=, NULL);
45549
45550- fscache_stat(&fscache_n_uncaches);
45551+ fscache_stat_unchecked(&fscache_n_uncaches);
45552
45553 /* cache withdrawal may beat us to it */
45554 if (!PageFsCache(page))
45555@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
45556 unsigned long loop;
45557
45558 #ifdef CONFIG_FSCACHE_STATS
45559- atomic_add(pagevec->nr, &fscache_n_marks);
45560+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
45561 #endif
45562
45563 for (loop = 0; loop < pagevec->nr; loop++) {
45564diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
45565index 4765190..2a067f2 100644
45566--- a/fs/fscache/stats.c
45567+++ b/fs/fscache/stats.c
45568@@ -18,95 +18,95 @@
45569 /*
45570 * operation counters
45571 */
45572-atomic_t fscache_n_op_pend;
45573-atomic_t fscache_n_op_run;
45574-atomic_t fscache_n_op_enqueue;
45575-atomic_t fscache_n_op_requeue;
45576-atomic_t fscache_n_op_deferred_release;
45577-atomic_t fscache_n_op_release;
45578-atomic_t fscache_n_op_gc;
45579-atomic_t fscache_n_op_cancelled;
45580-atomic_t fscache_n_op_rejected;
45581+atomic_unchecked_t fscache_n_op_pend;
45582+atomic_unchecked_t fscache_n_op_run;
45583+atomic_unchecked_t fscache_n_op_enqueue;
45584+atomic_unchecked_t fscache_n_op_requeue;
45585+atomic_unchecked_t fscache_n_op_deferred_release;
45586+atomic_unchecked_t fscache_n_op_release;
45587+atomic_unchecked_t fscache_n_op_gc;
45588+atomic_unchecked_t fscache_n_op_cancelled;
45589+atomic_unchecked_t fscache_n_op_rejected;
45590
45591-atomic_t fscache_n_attr_changed;
45592-atomic_t fscache_n_attr_changed_ok;
45593-atomic_t fscache_n_attr_changed_nobufs;
45594-atomic_t fscache_n_attr_changed_nomem;
45595-atomic_t fscache_n_attr_changed_calls;
45596+atomic_unchecked_t fscache_n_attr_changed;
45597+atomic_unchecked_t fscache_n_attr_changed_ok;
45598+atomic_unchecked_t fscache_n_attr_changed_nobufs;
45599+atomic_unchecked_t fscache_n_attr_changed_nomem;
45600+atomic_unchecked_t fscache_n_attr_changed_calls;
45601
45602-atomic_t fscache_n_allocs;
45603-atomic_t fscache_n_allocs_ok;
45604-atomic_t fscache_n_allocs_wait;
45605-atomic_t fscache_n_allocs_nobufs;
45606-atomic_t fscache_n_allocs_intr;
45607-atomic_t fscache_n_allocs_object_dead;
45608-atomic_t fscache_n_alloc_ops;
45609-atomic_t fscache_n_alloc_op_waits;
45610+atomic_unchecked_t fscache_n_allocs;
45611+atomic_unchecked_t fscache_n_allocs_ok;
45612+atomic_unchecked_t fscache_n_allocs_wait;
45613+atomic_unchecked_t fscache_n_allocs_nobufs;
45614+atomic_unchecked_t fscache_n_allocs_intr;
45615+atomic_unchecked_t fscache_n_allocs_object_dead;
45616+atomic_unchecked_t fscache_n_alloc_ops;
45617+atomic_unchecked_t fscache_n_alloc_op_waits;
45618
45619-atomic_t fscache_n_retrievals;
45620-atomic_t fscache_n_retrievals_ok;
45621-atomic_t fscache_n_retrievals_wait;
45622-atomic_t fscache_n_retrievals_nodata;
45623-atomic_t fscache_n_retrievals_nobufs;
45624-atomic_t fscache_n_retrievals_intr;
45625-atomic_t fscache_n_retrievals_nomem;
45626-atomic_t fscache_n_retrievals_object_dead;
45627-atomic_t fscache_n_retrieval_ops;
45628-atomic_t fscache_n_retrieval_op_waits;
45629+atomic_unchecked_t fscache_n_retrievals;
45630+atomic_unchecked_t fscache_n_retrievals_ok;
45631+atomic_unchecked_t fscache_n_retrievals_wait;
45632+atomic_unchecked_t fscache_n_retrievals_nodata;
45633+atomic_unchecked_t fscache_n_retrievals_nobufs;
45634+atomic_unchecked_t fscache_n_retrievals_intr;
45635+atomic_unchecked_t fscache_n_retrievals_nomem;
45636+atomic_unchecked_t fscache_n_retrievals_object_dead;
45637+atomic_unchecked_t fscache_n_retrieval_ops;
45638+atomic_unchecked_t fscache_n_retrieval_op_waits;
45639
45640-atomic_t fscache_n_stores;
45641-atomic_t fscache_n_stores_ok;
45642-atomic_t fscache_n_stores_again;
45643-atomic_t fscache_n_stores_nobufs;
45644-atomic_t fscache_n_stores_oom;
45645-atomic_t fscache_n_store_ops;
45646-atomic_t fscache_n_store_calls;
45647-atomic_t fscache_n_store_pages;
45648-atomic_t fscache_n_store_radix_deletes;
45649-atomic_t fscache_n_store_pages_over_limit;
45650+atomic_unchecked_t fscache_n_stores;
45651+atomic_unchecked_t fscache_n_stores_ok;
45652+atomic_unchecked_t fscache_n_stores_again;
45653+atomic_unchecked_t fscache_n_stores_nobufs;
45654+atomic_unchecked_t fscache_n_stores_oom;
45655+atomic_unchecked_t fscache_n_store_ops;
45656+atomic_unchecked_t fscache_n_store_calls;
45657+atomic_unchecked_t fscache_n_store_pages;
45658+atomic_unchecked_t fscache_n_store_radix_deletes;
45659+atomic_unchecked_t fscache_n_store_pages_over_limit;
45660
45661-atomic_t fscache_n_store_vmscan_not_storing;
45662-atomic_t fscache_n_store_vmscan_gone;
45663-atomic_t fscache_n_store_vmscan_busy;
45664-atomic_t fscache_n_store_vmscan_cancelled;
45665+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
45666+atomic_unchecked_t fscache_n_store_vmscan_gone;
45667+atomic_unchecked_t fscache_n_store_vmscan_busy;
45668+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
45669
45670-atomic_t fscache_n_marks;
45671-atomic_t fscache_n_uncaches;
45672+atomic_unchecked_t fscache_n_marks;
45673+atomic_unchecked_t fscache_n_uncaches;
45674
45675-atomic_t fscache_n_acquires;
45676-atomic_t fscache_n_acquires_null;
45677-atomic_t fscache_n_acquires_no_cache;
45678-atomic_t fscache_n_acquires_ok;
45679-atomic_t fscache_n_acquires_nobufs;
45680-atomic_t fscache_n_acquires_oom;
45681+atomic_unchecked_t fscache_n_acquires;
45682+atomic_unchecked_t fscache_n_acquires_null;
45683+atomic_unchecked_t fscache_n_acquires_no_cache;
45684+atomic_unchecked_t fscache_n_acquires_ok;
45685+atomic_unchecked_t fscache_n_acquires_nobufs;
45686+atomic_unchecked_t fscache_n_acquires_oom;
45687
45688-atomic_t fscache_n_updates;
45689-atomic_t fscache_n_updates_null;
45690-atomic_t fscache_n_updates_run;
45691+atomic_unchecked_t fscache_n_updates;
45692+atomic_unchecked_t fscache_n_updates_null;
45693+atomic_unchecked_t fscache_n_updates_run;
45694
45695-atomic_t fscache_n_relinquishes;
45696-atomic_t fscache_n_relinquishes_null;
45697-atomic_t fscache_n_relinquishes_waitcrt;
45698-atomic_t fscache_n_relinquishes_retire;
45699+atomic_unchecked_t fscache_n_relinquishes;
45700+atomic_unchecked_t fscache_n_relinquishes_null;
45701+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
45702+atomic_unchecked_t fscache_n_relinquishes_retire;
45703
45704-atomic_t fscache_n_cookie_index;
45705-atomic_t fscache_n_cookie_data;
45706-atomic_t fscache_n_cookie_special;
45707+atomic_unchecked_t fscache_n_cookie_index;
45708+atomic_unchecked_t fscache_n_cookie_data;
45709+atomic_unchecked_t fscache_n_cookie_special;
45710
45711-atomic_t fscache_n_object_alloc;
45712-atomic_t fscache_n_object_no_alloc;
45713-atomic_t fscache_n_object_lookups;
45714-atomic_t fscache_n_object_lookups_negative;
45715-atomic_t fscache_n_object_lookups_positive;
45716-atomic_t fscache_n_object_lookups_timed_out;
45717-atomic_t fscache_n_object_created;
45718-atomic_t fscache_n_object_avail;
45719-atomic_t fscache_n_object_dead;
45720+atomic_unchecked_t fscache_n_object_alloc;
45721+atomic_unchecked_t fscache_n_object_no_alloc;
45722+atomic_unchecked_t fscache_n_object_lookups;
45723+atomic_unchecked_t fscache_n_object_lookups_negative;
45724+atomic_unchecked_t fscache_n_object_lookups_positive;
45725+atomic_unchecked_t fscache_n_object_lookups_timed_out;
45726+atomic_unchecked_t fscache_n_object_created;
45727+atomic_unchecked_t fscache_n_object_avail;
45728+atomic_unchecked_t fscache_n_object_dead;
45729
45730-atomic_t fscache_n_checkaux_none;
45731-atomic_t fscache_n_checkaux_okay;
45732-atomic_t fscache_n_checkaux_update;
45733-atomic_t fscache_n_checkaux_obsolete;
45734+atomic_unchecked_t fscache_n_checkaux_none;
45735+atomic_unchecked_t fscache_n_checkaux_okay;
45736+atomic_unchecked_t fscache_n_checkaux_update;
45737+atomic_unchecked_t fscache_n_checkaux_obsolete;
45738
45739 atomic_t fscache_n_cop_alloc_object;
45740 atomic_t fscache_n_cop_lookup_object;
45741@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
45742 seq_puts(m, "FS-Cache statistics\n");
45743
45744 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
45745- atomic_read(&fscache_n_cookie_index),
45746- atomic_read(&fscache_n_cookie_data),
45747- atomic_read(&fscache_n_cookie_special));
45748+ atomic_read_unchecked(&fscache_n_cookie_index),
45749+ atomic_read_unchecked(&fscache_n_cookie_data),
45750+ atomic_read_unchecked(&fscache_n_cookie_special));
45751
45752 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
45753- atomic_read(&fscache_n_object_alloc),
45754- atomic_read(&fscache_n_object_no_alloc),
45755- atomic_read(&fscache_n_object_avail),
45756- atomic_read(&fscache_n_object_dead));
45757+ atomic_read_unchecked(&fscache_n_object_alloc),
45758+ atomic_read_unchecked(&fscache_n_object_no_alloc),
45759+ atomic_read_unchecked(&fscache_n_object_avail),
45760+ atomic_read_unchecked(&fscache_n_object_dead));
45761 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
45762- atomic_read(&fscache_n_checkaux_none),
45763- atomic_read(&fscache_n_checkaux_okay),
45764- atomic_read(&fscache_n_checkaux_update),
45765- atomic_read(&fscache_n_checkaux_obsolete));
45766+ atomic_read_unchecked(&fscache_n_checkaux_none),
45767+ atomic_read_unchecked(&fscache_n_checkaux_okay),
45768+ atomic_read_unchecked(&fscache_n_checkaux_update),
45769+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
45770
45771 seq_printf(m, "Pages : mrk=%u unc=%u\n",
45772- atomic_read(&fscache_n_marks),
45773- atomic_read(&fscache_n_uncaches));
45774+ atomic_read_unchecked(&fscache_n_marks),
45775+ atomic_read_unchecked(&fscache_n_uncaches));
45776
45777 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
45778 " oom=%u\n",
45779- atomic_read(&fscache_n_acquires),
45780- atomic_read(&fscache_n_acquires_null),
45781- atomic_read(&fscache_n_acquires_no_cache),
45782- atomic_read(&fscache_n_acquires_ok),
45783- atomic_read(&fscache_n_acquires_nobufs),
45784- atomic_read(&fscache_n_acquires_oom));
45785+ atomic_read_unchecked(&fscache_n_acquires),
45786+ atomic_read_unchecked(&fscache_n_acquires_null),
45787+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
45788+ atomic_read_unchecked(&fscache_n_acquires_ok),
45789+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
45790+ atomic_read_unchecked(&fscache_n_acquires_oom));
45791
45792 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
45793- atomic_read(&fscache_n_object_lookups),
45794- atomic_read(&fscache_n_object_lookups_negative),
45795- atomic_read(&fscache_n_object_lookups_positive),
45796- atomic_read(&fscache_n_object_created),
45797- atomic_read(&fscache_n_object_lookups_timed_out));
45798+ atomic_read_unchecked(&fscache_n_object_lookups),
45799+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
45800+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
45801+ atomic_read_unchecked(&fscache_n_object_created),
45802+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
45803
45804 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
45805- atomic_read(&fscache_n_updates),
45806- atomic_read(&fscache_n_updates_null),
45807- atomic_read(&fscache_n_updates_run));
45808+ atomic_read_unchecked(&fscache_n_updates),
45809+ atomic_read_unchecked(&fscache_n_updates_null),
45810+ atomic_read_unchecked(&fscache_n_updates_run));
45811
45812 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
45813- atomic_read(&fscache_n_relinquishes),
45814- atomic_read(&fscache_n_relinquishes_null),
45815- atomic_read(&fscache_n_relinquishes_waitcrt),
45816- atomic_read(&fscache_n_relinquishes_retire));
45817+ atomic_read_unchecked(&fscache_n_relinquishes),
45818+ atomic_read_unchecked(&fscache_n_relinquishes_null),
45819+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
45820+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
45821
45822 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
45823- atomic_read(&fscache_n_attr_changed),
45824- atomic_read(&fscache_n_attr_changed_ok),
45825- atomic_read(&fscache_n_attr_changed_nobufs),
45826- atomic_read(&fscache_n_attr_changed_nomem),
45827- atomic_read(&fscache_n_attr_changed_calls));
45828+ atomic_read_unchecked(&fscache_n_attr_changed),
45829+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
45830+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
45831+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
45832+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
45833
45834 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
45835- atomic_read(&fscache_n_allocs),
45836- atomic_read(&fscache_n_allocs_ok),
45837- atomic_read(&fscache_n_allocs_wait),
45838- atomic_read(&fscache_n_allocs_nobufs),
45839- atomic_read(&fscache_n_allocs_intr));
45840+ atomic_read_unchecked(&fscache_n_allocs),
45841+ atomic_read_unchecked(&fscache_n_allocs_ok),
45842+ atomic_read_unchecked(&fscache_n_allocs_wait),
45843+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
45844+ atomic_read_unchecked(&fscache_n_allocs_intr));
45845 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
45846- atomic_read(&fscache_n_alloc_ops),
45847- atomic_read(&fscache_n_alloc_op_waits),
45848- atomic_read(&fscache_n_allocs_object_dead));
45849+ atomic_read_unchecked(&fscache_n_alloc_ops),
45850+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
45851+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
45852
45853 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
45854 " int=%u oom=%u\n",
45855- atomic_read(&fscache_n_retrievals),
45856- atomic_read(&fscache_n_retrievals_ok),
45857- atomic_read(&fscache_n_retrievals_wait),
45858- atomic_read(&fscache_n_retrievals_nodata),
45859- atomic_read(&fscache_n_retrievals_nobufs),
45860- atomic_read(&fscache_n_retrievals_intr),
45861- atomic_read(&fscache_n_retrievals_nomem));
45862+ atomic_read_unchecked(&fscache_n_retrievals),
45863+ atomic_read_unchecked(&fscache_n_retrievals_ok),
45864+ atomic_read_unchecked(&fscache_n_retrievals_wait),
45865+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
45866+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
45867+ atomic_read_unchecked(&fscache_n_retrievals_intr),
45868+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
45869 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
45870- atomic_read(&fscache_n_retrieval_ops),
45871- atomic_read(&fscache_n_retrieval_op_waits),
45872- atomic_read(&fscache_n_retrievals_object_dead));
45873+ atomic_read_unchecked(&fscache_n_retrieval_ops),
45874+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
45875+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
45876
45877 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
45878- atomic_read(&fscache_n_stores),
45879- atomic_read(&fscache_n_stores_ok),
45880- atomic_read(&fscache_n_stores_again),
45881- atomic_read(&fscache_n_stores_nobufs),
45882- atomic_read(&fscache_n_stores_oom));
45883+ atomic_read_unchecked(&fscache_n_stores),
45884+ atomic_read_unchecked(&fscache_n_stores_ok),
45885+ atomic_read_unchecked(&fscache_n_stores_again),
45886+ atomic_read_unchecked(&fscache_n_stores_nobufs),
45887+ atomic_read_unchecked(&fscache_n_stores_oom));
45888 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
45889- atomic_read(&fscache_n_store_ops),
45890- atomic_read(&fscache_n_store_calls),
45891- atomic_read(&fscache_n_store_pages),
45892- atomic_read(&fscache_n_store_radix_deletes),
45893- atomic_read(&fscache_n_store_pages_over_limit));
45894+ atomic_read_unchecked(&fscache_n_store_ops),
45895+ atomic_read_unchecked(&fscache_n_store_calls),
45896+ atomic_read_unchecked(&fscache_n_store_pages),
45897+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
45898+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
45899
45900 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
45901- atomic_read(&fscache_n_store_vmscan_not_storing),
45902- atomic_read(&fscache_n_store_vmscan_gone),
45903- atomic_read(&fscache_n_store_vmscan_busy),
45904- atomic_read(&fscache_n_store_vmscan_cancelled));
45905+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
45906+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
45907+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
45908+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
45909
45910 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
45911- atomic_read(&fscache_n_op_pend),
45912- atomic_read(&fscache_n_op_run),
45913- atomic_read(&fscache_n_op_enqueue),
45914- atomic_read(&fscache_n_op_cancelled),
45915- atomic_read(&fscache_n_op_rejected));
45916+ atomic_read_unchecked(&fscache_n_op_pend),
45917+ atomic_read_unchecked(&fscache_n_op_run),
45918+ atomic_read_unchecked(&fscache_n_op_enqueue),
45919+ atomic_read_unchecked(&fscache_n_op_cancelled),
45920+ atomic_read_unchecked(&fscache_n_op_rejected));
45921 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
45922- atomic_read(&fscache_n_op_deferred_release),
45923- atomic_read(&fscache_n_op_release),
45924- atomic_read(&fscache_n_op_gc));
45925+ atomic_read_unchecked(&fscache_n_op_deferred_release),
45926+ atomic_read_unchecked(&fscache_n_op_release),
45927+ atomic_read_unchecked(&fscache_n_op_gc));
45928
45929 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
45930 atomic_read(&fscache_n_cop_alloc_object),
45931diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
45932index 3426521..3b75162 100644
45933--- a/fs/fuse/cuse.c
45934+++ b/fs/fuse/cuse.c
45935@@ -587,10 +587,12 @@ static int __init cuse_init(void)
45936 INIT_LIST_HEAD(&cuse_conntbl[i]);
45937
45938 /* inherit and extend fuse_dev_operations */
45939- cuse_channel_fops = fuse_dev_operations;
45940- cuse_channel_fops.owner = THIS_MODULE;
45941- cuse_channel_fops.open = cuse_channel_open;
45942- cuse_channel_fops.release = cuse_channel_release;
45943+ pax_open_kernel();
45944+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
45945+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
45946+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
45947+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
45948+ pax_close_kernel();
45949
45950 cuse_class = class_create(THIS_MODULE, "cuse");
45951 if (IS_ERR(cuse_class))
45952diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
45953index 7df2b5e..5804aa7 100644
45954--- a/fs/fuse/dev.c
45955+++ b/fs/fuse/dev.c
45956@@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
45957 ret = 0;
45958 pipe_lock(pipe);
45959
45960- if (!pipe->readers) {
45961+ if (!atomic_read(&pipe->readers)) {
45962 send_sig(SIGPIPE, current, 0);
45963 if (!ret)
45964 ret = -EPIPE;
45965diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
45966index bc43832..0cfe5a6 100644
45967--- a/fs/fuse/dir.c
45968+++ b/fs/fuse/dir.c
45969@@ -1181,7 +1181,7 @@ static char *read_link(struct dentry *dentry)
45970 return link;
45971 }
45972
45973-static void free_link(char *link)
45974+static void free_link(const char *link)
45975 {
45976 if (!IS_ERR(link))
45977 free_page((unsigned long) link);
45978diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
45979index a9ba244..d9df391 100644
45980--- a/fs/gfs2/inode.c
45981+++ b/fs/gfs2/inode.c
45982@@ -1496,7 +1496,7 @@ out:
45983
45984 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
45985 {
45986- char *s = nd_get_link(nd);
45987+ const char *s = nd_get_link(nd);
45988 if (!IS_ERR(s))
45989 kfree(s);
45990 }
45991diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
45992index 001ef01..f7d5f07 100644
45993--- a/fs/hugetlbfs/inode.c
45994+++ b/fs/hugetlbfs/inode.c
45995@@ -920,7 +920,7 @@ static struct file_system_type hugetlbfs_fs_type = {
45996 .kill_sb = kill_litter_super,
45997 };
45998
45999-static struct vfsmount *hugetlbfs_vfsmount;
46000+struct vfsmount *hugetlbfs_vfsmount;
46001
46002 static int can_do_hugetlb_shm(void)
46003 {
46004diff --git a/fs/inode.c b/fs/inode.c
46005index 9f4f5fe..6214688 100644
46006--- a/fs/inode.c
46007+++ b/fs/inode.c
46008@@ -860,8 +860,8 @@ unsigned int get_next_ino(void)
46009
46010 #ifdef CONFIG_SMP
46011 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
46012- static atomic_t shared_last_ino;
46013- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
46014+ static atomic_unchecked_t shared_last_ino;
46015+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
46016
46017 res = next - LAST_INO_BATCH;
46018 }
46019diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
46020index 4a6cf28..d3a29d3 100644
46021--- a/fs/jffs2/erase.c
46022+++ b/fs/jffs2/erase.c
46023@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
46024 struct jffs2_unknown_node marker = {
46025 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
46026 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46027- .totlen = cpu_to_je32(c->cleanmarker_size)
46028+ .totlen = cpu_to_je32(c->cleanmarker_size),
46029+ .hdr_crc = cpu_to_je32(0)
46030 };
46031
46032 jffs2_prealloc_raw_node_refs(c, jeb, 1);
46033diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
46034index 74d9be1..d5dd140 100644
46035--- a/fs/jffs2/wbuf.c
46036+++ b/fs/jffs2/wbuf.c
46037@@ -1022,7 +1022,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
46038 {
46039 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
46040 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46041- .totlen = constant_cpu_to_je32(8)
46042+ .totlen = constant_cpu_to_je32(8),
46043+ .hdr_crc = constant_cpu_to_je32(0)
46044 };
46045
46046 /*
46047diff --git a/fs/jfs/super.c b/fs/jfs/super.c
46048index 4a82950..bcaa0cb 100644
46049--- a/fs/jfs/super.c
46050+++ b/fs/jfs/super.c
46051@@ -801,7 +801,7 @@ static int __init init_jfs_fs(void)
46052
46053 jfs_inode_cachep =
46054 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
46055- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
46056+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
46057 init_once);
46058 if (jfs_inode_cachep == NULL)
46059 return -ENOMEM;
46060diff --git a/fs/libfs.c b/fs/libfs.c
46061index 18d08f5..fe3dc64 100644
46062--- a/fs/libfs.c
46063+++ b/fs/libfs.c
46064@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
46065
46066 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
46067 struct dentry *next;
46068+ char d_name[sizeof(next->d_iname)];
46069+ const unsigned char *name;
46070+
46071 next = list_entry(p, struct dentry, d_u.d_child);
46072 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
46073 if (!simple_positive(next)) {
46074@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
46075
46076 spin_unlock(&next->d_lock);
46077 spin_unlock(&dentry->d_lock);
46078- if (filldir(dirent, next->d_name.name,
46079+ name = next->d_name.name;
46080+ if (name == next->d_iname) {
46081+ memcpy(d_name, name, next->d_name.len);
46082+ name = d_name;
46083+ }
46084+ if (filldir(dirent, name,
46085 next->d_name.len, filp->f_pos,
46086 next->d_inode->i_ino,
46087 dt_type(next->d_inode)) < 0)
46088diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
46089index 8392cb8..80d6193 100644
46090--- a/fs/lockd/clntproc.c
46091+++ b/fs/lockd/clntproc.c
46092@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
46093 /*
46094 * Cookie counter for NLM requests
46095 */
46096-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
46097+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
46098
46099 void nlmclnt_next_cookie(struct nlm_cookie *c)
46100 {
46101- u32 cookie = atomic_inc_return(&nlm_cookie);
46102+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
46103
46104 memcpy(c->data, &cookie, 4);
46105 c->len=4;
46106diff --git a/fs/locks.c b/fs/locks.c
46107index 0d68f1f..f216b79 100644
46108--- a/fs/locks.c
46109+++ b/fs/locks.c
46110@@ -2075,16 +2075,16 @@ void locks_remove_flock(struct file *filp)
46111 return;
46112
46113 if (filp->f_op && filp->f_op->flock) {
46114- struct file_lock fl = {
46115+ struct file_lock flock = {
46116 .fl_pid = current->tgid,
46117 .fl_file = filp,
46118 .fl_flags = FL_FLOCK,
46119 .fl_type = F_UNLCK,
46120 .fl_end = OFFSET_MAX,
46121 };
46122- filp->f_op->flock(filp, F_SETLKW, &fl);
46123- if (fl.fl_ops && fl.fl_ops->fl_release_private)
46124- fl.fl_ops->fl_release_private(&fl);
46125+ filp->f_op->flock(filp, F_SETLKW, &flock);
46126+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
46127+ flock.fl_ops->fl_release_private(&flock);
46128 }
46129
46130 lock_flocks();
46131diff --git a/fs/namei.c b/fs/namei.c
46132index c427919..e37fd3f 100644
46133--- a/fs/namei.c
46134+++ b/fs/namei.c
46135@@ -278,16 +278,32 @@ int generic_permission(struct inode *inode, int mask)
46136 if (ret != -EACCES)
46137 return ret;
46138
46139+#ifdef CONFIG_GRKERNSEC
46140+ /* we'll block if we have to log due to a denied capability use */
46141+ if (mask & MAY_NOT_BLOCK)
46142+ return -ECHILD;
46143+#endif
46144+
46145 if (S_ISDIR(inode->i_mode)) {
46146 /* DACs are overridable for directories */
46147- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46148- return 0;
46149 if (!(mask & MAY_WRITE))
46150- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46151+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46152+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46153 return 0;
46154+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46155+ return 0;
46156 return -EACCES;
46157 }
46158 /*
46159+ * Searching includes executable on directories, else just read.
46160+ */
46161+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46162+ if (mask == MAY_READ)
46163+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46164+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46165+ return 0;
46166+
46167+ /*
46168 * Read/write DACs are always overridable.
46169 * Executable DACs are overridable when there is
46170 * at least one exec bit set.
46171@@ -296,14 +312,6 @@ int generic_permission(struct inode *inode, int mask)
46172 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46173 return 0;
46174
46175- /*
46176- * Searching includes executable on directories, else just read.
46177- */
46178- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46179- if (mask == MAY_READ)
46180- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46181- return 0;
46182-
46183 return -EACCES;
46184 }
46185
46186@@ -652,11 +660,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
46187 return error;
46188 }
46189
46190+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
46191+ dentry->d_inode, dentry, nd->path.mnt)) {
46192+ error = -EACCES;
46193+ *p = ERR_PTR(error); /* no ->put_link(), please */
46194+ path_put(&nd->path);
46195+ return error;
46196+ }
46197+
46198 nd->last_type = LAST_BIND;
46199 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
46200 error = PTR_ERR(*p);
46201 if (!IS_ERR(*p)) {
46202- char *s = nd_get_link(nd);
46203+ const char *s = nd_get_link(nd);
46204 error = 0;
46205 if (s)
46206 error = __vfs_follow_link(nd, s);
46207@@ -1753,6 +1769,21 @@ static int path_lookupat(int dfd, const char *name,
46208 if (!err)
46209 err = complete_walk(nd);
46210
46211+ if (!(nd->flags & LOOKUP_PARENT)) {
46212+#ifdef CONFIG_GRKERNSEC
46213+ if (flags & LOOKUP_RCU) {
46214+ if (!err)
46215+ path_put(&nd->path);
46216+ err = -ECHILD;
46217+ } else
46218+#endif
46219+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46220+ if (!err)
46221+ path_put(&nd->path);
46222+ err = -ENOENT;
46223+ }
46224+ }
46225+
46226 if (!err && nd->flags & LOOKUP_DIRECTORY) {
46227 if (!nd->inode->i_op->lookup) {
46228 path_put(&nd->path);
46229@@ -1780,6 +1811,15 @@ static int do_path_lookup(int dfd, const char *name,
46230 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
46231
46232 if (likely(!retval)) {
46233+ if (*name != '/' && nd->path.dentry && nd->inode) {
46234+#ifdef CONFIG_GRKERNSEC
46235+ if (flags & LOOKUP_RCU)
46236+ return -ECHILD;
46237+#endif
46238+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
46239+ return -ENOENT;
46240+ }
46241+
46242 if (unlikely(!audit_dummy_context())) {
46243 if (nd->path.dentry && nd->inode)
46244 audit_inode(name, nd->path.dentry);
46245@@ -2126,6 +2166,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
46246 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
46247 return -EPERM;
46248
46249+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
46250+ return -EPERM;
46251+ if (gr_handle_rawio(inode))
46252+ return -EPERM;
46253+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
46254+ return -EACCES;
46255+
46256 return 0;
46257 }
46258
46259@@ -2187,6 +2234,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46260 error = complete_walk(nd);
46261 if (error)
46262 return ERR_PTR(error);
46263+#ifdef CONFIG_GRKERNSEC
46264+ if (nd->flags & LOOKUP_RCU) {
46265+ error = -ECHILD;
46266+ goto exit;
46267+ }
46268+#endif
46269+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46270+ error = -ENOENT;
46271+ goto exit;
46272+ }
46273 audit_inode(pathname, nd->path.dentry);
46274 if (open_flag & O_CREAT) {
46275 error = -EISDIR;
46276@@ -2197,6 +2254,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46277 error = complete_walk(nd);
46278 if (error)
46279 return ERR_PTR(error);
46280+#ifdef CONFIG_GRKERNSEC
46281+ if (nd->flags & LOOKUP_RCU) {
46282+ error = -ECHILD;
46283+ goto exit;
46284+ }
46285+#endif
46286+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
46287+ error = -ENOENT;
46288+ goto exit;
46289+ }
46290 audit_inode(pathname, dir);
46291 goto ok;
46292 }
46293@@ -2218,6 +2285,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46294 error = complete_walk(nd);
46295 if (error)
46296 return ERR_PTR(error);
46297+#ifdef CONFIG_GRKERNSEC
46298+ if (nd->flags & LOOKUP_RCU) {
46299+ error = -ECHILD;
46300+ goto exit;
46301+ }
46302+#endif
46303+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46304+ error = -ENOENT;
46305+ goto exit;
46306+ }
46307
46308 error = -ENOTDIR;
46309 if (nd->flags & LOOKUP_DIRECTORY) {
46310@@ -2258,6 +2335,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46311 /* Negative dentry, just create the file */
46312 if (!dentry->d_inode) {
46313 umode_t mode = op->mode;
46314+
46315+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
46316+ error = -EACCES;
46317+ goto exit_mutex_unlock;
46318+ }
46319+
46320 if (!IS_POSIXACL(dir->d_inode))
46321 mode &= ~current_umask();
46322 /*
46323@@ -2281,6 +2364,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46324 error = vfs_create(dir->d_inode, dentry, mode, nd);
46325 if (error)
46326 goto exit_mutex_unlock;
46327+ else
46328+ gr_handle_create(path->dentry, path->mnt);
46329 mutex_unlock(&dir->d_inode->i_mutex);
46330 dput(nd->path.dentry);
46331 nd->path.dentry = dentry;
46332@@ -2290,6 +2375,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46333 /*
46334 * It already exists.
46335 */
46336+
46337+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
46338+ error = -ENOENT;
46339+ goto exit_mutex_unlock;
46340+ }
46341+
46342+ /* only check if O_CREAT is specified, all other checks need to go
46343+ into may_open */
46344+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
46345+ error = -EACCES;
46346+ goto exit_mutex_unlock;
46347+ }
46348+
46349 mutex_unlock(&dir->d_inode->i_mutex);
46350 audit_inode(pathname, path->dentry);
46351
46352@@ -2502,6 +2600,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
46353 *path = nd.path;
46354 return dentry;
46355 eexist:
46356+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
46357+ dput(dentry);
46358+ dentry = ERR_PTR(-ENOENT);
46359+ goto fail;
46360+ }
46361 dput(dentry);
46362 dentry = ERR_PTR(-EEXIST);
46363 fail:
46364@@ -2524,6 +2627,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
46365 }
46366 EXPORT_SYMBOL(user_path_create);
46367
46368+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
46369+{
46370+ char *tmp = getname(pathname);
46371+ struct dentry *res;
46372+ if (IS_ERR(tmp))
46373+ return ERR_CAST(tmp);
46374+ res = kern_path_create(dfd, tmp, path, is_dir);
46375+ if (IS_ERR(res))
46376+ putname(tmp);
46377+ else
46378+ *to = tmp;
46379+ return res;
46380+}
46381+
46382 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
46383 {
46384 int error = may_create(dir, dentry);
46385@@ -2591,6 +2708,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
46386 error = mnt_want_write(path.mnt);
46387 if (error)
46388 goto out_dput;
46389+
46390+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
46391+ error = -EPERM;
46392+ goto out_drop_write;
46393+ }
46394+
46395+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
46396+ error = -EACCES;
46397+ goto out_drop_write;
46398+ }
46399+
46400 error = security_path_mknod(&path, dentry, mode, dev);
46401 if (error)
46402 goto out_drop_write;
46403@@ -2608,6 +2736,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
46404 }
46405 out_drop_write:
46406 mnt_drop_write(path.mnt);
46407+
46408+ if (!error)
46409+ gr_handle_create(dentry, path.mnt);
46410 out_dput:
46411 dput(dentry);
46412 mutex_unlock(&path.dentry->d_inode->i_mutex);
46413@@ -2661,12 +2792,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
46414 error = mnt_want_write(path.mnt);
46415 if (error)
46416 goto out_dput;
46417+
46418+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
46419+ error = -EACCES;
46420+ goto out_drop_write;
46421+ }
46422+
46423 error = security_path_mkdir(&path, dentry, mode);
46424 if (error)
46425 goto out_drop_write;
46426 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
46427 out_drop_write:
46428 mnt_drop_write(path.mnt);
46429+
46430+ if (!error)
46431+ gr_handle_create(dentry, path.mnt);
46432 out_dput:
46433 dput(dentry);
46434 mutex_unlock(&path.dentry->d_inode->i_mutex);
46435@@ -2746,6 +2886,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46436 char * name;
46437 struct dentry *dentry;
46438 struct nameidata nd;
46439+ ino_t saved_ino = 0;
46440+ dev_t saved_dev = 0;
46441
46442 error = user_path_parent(dfd, pathname, &nd, &name);
46443 if (error)
46444@@ -2774,6 +2916,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
46445 error = -ENOENT;
46446 goto exit3;
46447 }
46448+
46449+ saved_ino = dentry->d_inode->i_ino;
46450+ saved_dev = gr_get_dev_from_dentry(dentry);
46451+
46452+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
46453+ error = -EACCES;
46454+ goto exit3;
46455+ }
46456+
46457 error = mnt_want_write(nd.path.mnt);
46458 if (error)
46459 goto exit3;
46460@@ -2781,6 +2932,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46461 if (error)
46462 goto exit4;
46463 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
46464+ if (!error && (saved_dev || saved_ino))
46465+ gr_handle_delete(saved_ino, saved_dev);
46466 exit4:
46467 mnt_drop_write(nd.path.mnt);
46468 exit3:
46469@@ -2843,6 +2996,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46470 struct dentry *dentry;
46471 struct nameidata nd;
46472 struct inode *inode = NULL;
46473+ ino_t saved_ino = 0;
46474+ dev_t saved_dev = 0;
46475
46476 error = user_path_parent(dfd, pathname, &nd, &name);
46477 if (error)
46478@@ -2865,6 +3020,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46479 if (!inode)
46480 goto slashes;
46481 ihold(inode);
46482+
46483+ if (inode->i_nlink <= 1) {
46484+ saved_ino = inode->i_ino;
46485+ saved_dev = gr_get_dev_from_dentry(dentry);
46486+ }
46487+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
46488+ error = -EACCES;
46489+ goto exit2;
46490+ }
46491+
46492 error = mnt_want_write(nd.path.mnt);
46493 if (error)
46494 goto exit2;
46495@@ -2872,6 +3037,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46496 if (error)
46497 goto exit3;
46498 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
46499+ if (!error && (saved_ino || saved_dev))
46500+ gr_handle_delete(saved_ino, saved_dev);
46501 exit3:
46502 mnt_drop_write(nd.path.mnt);
46503 exit2:
46504@@ -2947,10 +3114,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
46505 error = mnt_want_write(path.mnt);
46506 if (error)
46507 goto out_dput;
46508+
46509+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
46510+ error = -EACCES;
46511+ goto out_drop_write;
46512+ }
46513+
46514 error = security_path_symlink(&path, dentry, from);
46515 if (error)
46516 goto out_drop_write;
46517 error = vfs_symlink(path.dentry->d_inode, dentry, from);
46518+ if (!error)
46519+ gr_handle_create(dentry, path.mnt);
46520 out_drop_write:
46521 mnt_drop_write(path.mnt);
46522 out_dput:
46523@@ -3025,6 +3200,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46524 {
46525 struct dentry *new_dentry;
46526 struct path old_path, new_path;
46527+ char *to = NULL;
46528 int how = 0;
46529 int error;
46530
46531@@ -3048,7 +3224,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46532 if (error)
46533 return error;
46534
46535- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
46536+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
46537 error = PTR_ERR(new_dentry);
46538 if (IS_ERR(new_dentry))
46539 goto out;
46540@@ -3059,13 +3235,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46541 error = mnt_want_write(new_path.mnt);
46542 if (error)
46543 goto out_dput;
46544+
46545+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
46546+ old_path.dentry->d_inode,
46547+ old_path.dentry->d_inode->i_mode, to)) {
46548+ error = -EACCES;
46549+ goto out_drop_write;
46550+ }
46551+
46552+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
46553+ old_path.dentry, old_path.mnt, to)) {
46554+ error = -EACCES;
46555+ goto out_drop_write;
46556+ }
46557+
46558 error = security_path_link(old_path.dentry, &new_path, new_dentry);
46559 if (error)
46560 goto out_drop_write;
46561 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
46562+ if (!error)
46563+ gr_handle_create(new_dentry, new_path.mnt);
46564 out_drop_write:
46565 mnt_drop_write(new_path.mnt);
46566 out_dput:
46567+ putname(to);
46568 dput(new_dentry);
46569 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
46570 path_put(&new_path);
46571@@ -3299,6 +3492,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46572 if (new_dentry == trap)
46573 goto exit5;
46574
46575+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
46576+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
46577+ to);
46578+ if (error)
46579+ goto exit5;
46580+
46581 error = mnt_want_write(oldnd.path.mnt);
46582 if (error)
46583 goto exit5;
46584@@ -3308,6 +3507,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46585 goto exit6;
46586 error = vfs_rename(old_dir->d_inode, old_dentry,
46587 new_dir->d_inode, new_dentry);
46588+ if (!error)
46589+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
46590+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
46591 exit6:
46592 mnt_drop_write(oldnd.path.mnt);
46593 exit5:
46594@@ -3333,6 +3535,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
46595
46596 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
46597 {
46598+ char tmpbuf[64];
46599+ const char *newlink;
46600 int len;
46601
46602 len = PTR_ERR(link);
46603@@ -3342,7 +3546,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
46604 len = strlen(link);
46605 if (len > (unsigned) buflen)
46606 len = buflen;
46607- if (copy_to_user(buffer, link, len))
46608+
46609+ if (len < sizeof(tmpbuf)) {
46610+ memcpy(tmpbuf, link, len);
46611+ newlink = tmpbuf;
46612+ } else
46613+ newlink = link;
46614+
46615+ if (copy_to_user(buffer, newlink, len))
46616 len = -EFAULT;
46617 out:
46618 return len;
46619diff --git a/fs/namespace.c b/fs/namespace.c
46620index 4e46539..b28253c 100644
46621--- a/fs/namespace.c
46622+++ b/fs/namespace.c
46623@@ -1156,6 +1156,9 @@ static int do_umount(struct mount *mnt, int flags)
46624 if (!(sb->s_flags & MS_RDONLY))
46625 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
46626 up_write(&sb->s_umount);
46627+
46628+ gr_log_remount(mnt->mnt_devname, retval);
46629+
46630 return retval;
46631 }
46632
46633@@ -1175,6 +1178,9 @@ static int do_umount(struct mount *mnt, int flags)
46634 br_write_unlock(vfsmount_lock);
46635 up_write(&namespace_sem);
46636 release_mounts(&umount_list);
46637+
46638+ gr_log_unmount(mnt->mnt_devname, retval);
46639+
46640 return retval;
46641 }
46642
46643@@ -2176,6 +2182,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
46644 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
46645 MS_STRICTATIME);
46646
46647+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
46648+ retval = -EPERM;
46649+ goto dput_out;
46650+ }
46651+
46652+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
46653+ retval = -EPERM;
46654+ goto dput_out;
46655+ }
46656+
46657 if (flags & MS_REMOUNT)
46658 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
46659 data_page);
46660@@ -2190,6 +2206,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
46661 dev_name, data_page);
46662 dput_out:
46663 path_put(&path);
46664+
46665+ gr_log_mount(dev_name, dir_name, retval);
46666+
46667 return retval;
46668 }
46669
46670@@ -2471,6 +2490,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
46671 if (error)
46672 goto out2;
46673
46674+ if (gr_handle_chroot_pivot()) {
46675+ error = -EPERM;
46676+ goto out2;
46677+ }
46678+
46679 get_fs_root(current->fs, &root);
46680 error = lock_mount(&old);
46681 if (error)
46682diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
46683index e8bbfa5..864f936 100644
46684--- a/fs/nfs/inode.c
46685+++ b/fs/nfs/inode.c
46686@@ -152,7 +152,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
46687 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
46688 nfsi->attrtimeo_timestamp = jiffies;
46689
46690- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
46691+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
46692 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
46693 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
46694 else
46695@@ -1005,16 +1005,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
46696 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
46697 }
46698
46699-static atomic_long_t nfs_attr_generation_counter;
46700+static atomic_long_unchecked_t nfs_attr_generation_counter;
46701
46702 static unsigned long nfs_read_attr_generation_counter(void)
46703 {
46704- return atomic_long_read(&nfs_attr_generation_counter);
46705+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
46706 }
46707
46708 unsigned long nfs_inc_attr_generation_counter(void)
46709 {
46710- return atomic_long_inc_return(&nfs_attr_generation_counter);
46711+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
46712 }
46713
46714 void nfs_fattr_init(struct nfs_fattr *fattr)
46715diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
46716index 5686661..80a9a3a 100644
46717--- a/fs/nfsd/vfs.c
46718+++ b/fs/nfsd/vfs.c
46719@@ -933,7 +933,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
46720 } else {
46721 oldfs = get_fs();
46722 set_fs(KERNEL_DS);
46723- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
46724+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
46725 set_fs(oldfs);
46726 }
46727
46728@@ -1037,7 +1037,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
46729
46730 /* Write the data. */
46731 oldfs = get_fs(); set_fs(KERNEL_DS);
46732- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
46733+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
46734 set_fs(oldfs);
46735 if (host_err < 0)
46736 goto out_nfserr;
46737@@ -1573,7 +1573,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
46738 */
46739
46740 oldfs = get_fs(); set_fs(KERNEL_DS);
46741- host_err = inode->i_op->readlink(path.dentry, buf, *lenp);
46742+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
46743 set_fs(oldfs);
46744
46745 if (host_err < 0)
46746diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
46747index 3568c8a..e0240d8 100644
46748--- a/fs/notify/fanotify/fanotify_user.c
46749+++ b/fs/notify/fanotify/fanotify_user.c
46750@@ -278,7 +278,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
46751 goto out_close_fd;
46752
46753 ret = -EFAULT;
46754- if (copy_to_user(buf, &fanotify_event_metadata,
46755+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
46756+ copy_to_user(buf, &fanotify_event_metadata,
46757 fanotify_event_metadata.event_len))
46758 goto out_kill_access_response;
46759
46760diff --git a/fs/notify/notification.c b/fs/notify/notification.c
46761index c887b13..0fdf472 100644
46762--- a/fs/notify/notification.c
46763+++ b/fs/notify/notification.c
46764@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
46765 * get set to 0 so it will never get 'freed'
46766 */
46767 static struct fsnotify_event *q_overflow_event;
46768-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46769+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46770
46771 /**
46772 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
46773@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46774 */
46775 u32 fsnotify_get_cookie(void)
46776 {
46777- return atomic_inc_return(&fsnotify_sync_cookie);
46778+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
46779 }
46780 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
46781
46782diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
46783index 99e3610..02c1068 100644
46784--- a/fs/ntfs/dir.c
46785+++ b/fs/ntfs/dir.c
46786@@ -1329,7 +1329,7 @@ find_next_index_buffer:
46787 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
46788 ~(s64)(ndir->itype.index.block_size - 1)));
46789 /* Bounds checks. */
46790- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
46791+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
46792 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
46793 "inode 0x%lx or driver bug.", vdir->i_ino);
46794 goto err_out;
46795diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
46796index 8639169..76697aa 100644
46797--- a/fs/ntfs/file.c
46798+++ b/fs/ntfs/file.c
46799@@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
46800 #endif /* NTFS_RW */
46801 };
46802
46803-const struct file_operations ntfs_empty_file_ops = {};
46804+const struct file_operations ntfs_empty_file_ops __read_only;
46805
46806-const struct inode_operations ntfs_empty_inode_ops = {};
46807+const struct inode_operations ntfs_empty_inode_ops __read_only;
46808diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
46809index 210c352..a174f83 100644
46810--- a/fs/ocfs2/localalloc.c
46811+++ b/fs/ocfs2/localalloc.c
46812@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
46813 goto bail;
46814 }
46815
46816- atomic_inc(&osb->alloc_stats.moves);
46817+ atomic_inc_unchecked(&osb->alloc_stats.moves);
46818
46819 bail:
46820 if (handle)
46821diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
46822index d355e6e..578d905 100644
46823--- a/fs/ocfs2/ocfs2.h
46824+++ b/fs/ocfs2/ocfs2.h
46825@@ -235,11 +235,11 @@ enum ocfs2_vol_state
46826
46827 struct ocfs2_alloc_stats
46828 {
46829- atomic_t moves;
46830- atomic_t local_data;
46831- atomic_t bitmap_data;
46832- atomic_t bg_allocs;
46833- atomic_t bg_extends;
46834+ atomic_unchecked_t moves;
46835+ atomic_unchecked_t local_data;
46836+ atomic_unchecked_t bitmap_data;
46837+ atomic_unchecked_t bg_allocs;
46838+ atomic_unchecked_t bg_extends;
46839 };
46840
46841 enum ocfs2_local_alloc_state
46842diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
46843index f169da4..9112253 100644
46844--- a/fs/ocfs2/suballoc.c
46845+++ b/fs/ocfs2/suballoc.c
46846@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
46847 mlog_errno(status);
46848 goto bail;
46849 }
46850- atomic_inc(&osb->alloc_stats.bg_extends);
46851+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
46852
46853 /* You should never ask for this much metadata */
46854 BUG_ON(bits_wanted >
46855@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
46856 mlog_errno(status);
46857 goto bail;
46858 }
46859- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46860+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46861
46862 *suballoc_loc = res.sr_bg_blkno;
46863 *suballoc_bit_start = res.sr_bit_offset;
46864@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
46865 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
46866 res->sr_bits);
46867
46868- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46869+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46870
46871 BUG_ON(res->sr_bits != 1);
46872
46873@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
46874 mlog_errno(status);
46875 goto bail;
46876 }
46877- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46878+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46879
46880 BUG_ON(res.sr_bits != 1);
46881
46882@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
46883 cluster_start,
46884 num_clusters);
46885 if (!status)
46886- atomic_inc(&osb->alloc_stats.local_data);
46887+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
46888 } else {
46889 if (min_clusters > (osb->bitmap_cpg - 1)) {
46890 /* The only paths asking for contiguousness
46891@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
46892 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
46893 res.sr_bg_blkno,
46894 res.sr_bit_offset);
46895- atomic_inc(&osb->alloc_stats.bitmap_data);
46896+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
46897 *num_clusters = res.sr_bits;
46898 }
46899 }
46900diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
46901index 68f4541..89cfe6a 100644
46902--- a/fs/ocfs2/super.c
46903+++ b/fs/ocfs2/super.c
46904@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
46905 "%10s => GlobalAllocs: %d LocalAllocs: %d "
46906 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
46907 "Stats",
46908- atomic_read(&osb->alloc_stats.bitmap_data),
46909- atomic_read(&osb->alloc_stats.local_data),
46910- atomic_read(&osb->alloc_stats.bg_allocs),
46911- atomic_read(&osb->alloc_stats.moves),
46912- atomic_read(&osb->alloc_stats.bg_extends));
46913+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
46914+ atomic_read_unchecked(&osb->alloc_stats.local_data),
46915+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
46916+ atomic_read_unchecked(&osb->alloc_stats.moves),
46917+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
46918
46919 out += snprintf(buf + out, len - out,
46920 "%10s => State: %u Descriptor: %llu Size: %u bits "
46921@@ -2116,11 +2116,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
46922 spin_lock_init(&osb->osb_xattr_lock);
46923 ocfs2_init_steal_slots(osb);
46924
46925- atomic_set(&osb->alloc_stats.moves, 0);
46926- atomic_set(&osb->alloc_stats.local_data, 0);
46927- atomic_set(&osb->alloc_stats.bitmap_data, 0);
46928- atomic_set(&osb->alloc_stats.bg_allocs, 0);
46929- atomic_set(&osb->alloc_stats.bg_extends, 0);
46930+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
46931+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
46932+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
46933+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
46934+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
46935
46936 /* Copy the blockcheck stats from the superblock probe */
46937 osb->osb_ecc_stats = *stats;
46938diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
46939index 5d22872..523db20 100644
46940--- a/fs/ocfs2/symlink.c
46941+++ b/fs/ocfs2/symlink.c
46942@@ -142,7 +142,7 @@ bail:
46943
46944 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46945 {
46946- char *link = nd_get_link(nd);
46947+ const char *link = nd_get_link(nd);
46948 if (!IS_ERR(link))
46949 kfree(link);
46950 }
46951diff --git a/fs/open.c b/fs/open.c
46952index 5720854..ccfe124 100644
46953--- a/fs/open.c
46954+++ b/fs/open.c
46955@@ -31,6 +31,8 @@
46956 #include <linux/ima.h>
46957 #include <linux/dnotify.h>
46958
46959+#define CREATE_TRACE_POINTS
46960+#include <trace/events/fs.h>
46961 #include "internal.h"
46962
46963 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
46964@@ -112,6 +114,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
46965 error = locks_verify_truncate(inode, NULL, length);
46966 if (!error)
46967 error = security_path_truncate(&path);
46968+
46969+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
46970+ error = -EACCES;
46971+
46972 if (!error)
46973 error = do_truncate(path.dentry, length, 0, NULL);
46974
46975@@ -358,6 +364,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
46976 if (__mnt_is_readonly(path.mnt))
46977 res = -EROFS;
46978
46979+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
46980+ res = -EACCES;
46981+
46982 out_path_release:
46983 path_put(&path);
46984 out:
46985@@ -384,6 +393,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
46986 if (error)
46987 goto dput_and_out;
46988
46989+ gr_log_chdir(path.dentry, path.mnt);
46990+
46991 set_fs_pwd(current->fs, &path);
46992
46993 dput_and_out:
46994@@ -410,6 +421,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
46995 goto out_putf;
46996
46997 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
46998+
46999+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
47000+ error = -EPERM;
47001+
47002+ if (!error)
47003+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
47004+
47005 if (!error)
47006 set_fs_pwd(current->fs, &file->f_path);
47007 out_putf:
47008@@ -438,7 +456,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
47009 if (error)
47010 goto dput_and_out;
47011
47012+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
47013+ goto dput_and_out;
47014+
47015 set_fs_root(current->fs, &path);
47016+
47017+ gr_handle_chroot_chdir(&path);
47018+
47019 error = 0;
47020 dput_and_out:
47021 path_put(&path);
47022@@ -456,6 +480,16 @@ static int chmod_common(struct path *path, umode_t mode)
47023 if (error)
47024 return error;
47025 mutex_lock(&inode->i_mutex);
47026+
47027+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
47028+ error = -EACCES;
47029+ goto out_unlock;
47030+ }
47031+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
47032+ error = -EACCES;
47033+ goto out_unlock;
47034+ }
47035+
47036 error = security_path_chmod(path, mode);
47037 if (error)
47038 goto out_unlock;
47039@@ -506,6 +540,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
47040 int error;
47041 struct iattr newattrs;
47042
47043+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
47044+ return -EACCES;
47045+
47046 newattrs.ia_valid = ATTR_CTIME;
47047 if (user != (uid_t) -1) {
47048 newattrs.ia_valid |= ATTR_UID;
47049@@ -987,6 +1024,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
47050 } else {
47051 fsnotify_open(f);
47052 fd_install(fd, f);
47053+ trace_do_sys_open(tmp, flags, mode);
47054 }
47055 }
47056 putname(tmp);
47057diff --git a/fs/pipe.c b/fs/pipe.c
47058index fec5e4a..f4210f9 100644
47059--- a/fs/pipe.c
47060+++ b/fs/pipe.c
47061@@ -438,9 +438,9 @@ redo:
47062 }
47063 if (bufs) /* More to do? */
47064 continue;
47065- if (!pipe->writers)
47066+ if (!atomic_read(&pipe->writers))
47067 break;
47068- if (!pipe->waiting_writers) {
47069+ if (!atomic_read(&pipe->waiting_writers)) {
47070 /* syscall merging: Usually we must not sleep
47071 * if O_NONBLOCK is set, or if we got some data.
47072 * But if a writer sleeps in kernel space, then
47073@@ -504,7 +504,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
47074 mutex_lock(&inode->i_mutex);
47075 pipe = inode->i_pipe;
47076
47077- if (!pipe->readers) {
47078+ if (!atomic_read(&pipe->readers)) {
47079 send_sig(SIGPIPE, current, 0);
47080 ret = -EPIPE;
47081 goto out;
47082@@ -553,7 +553,7 @@ redo1:
47083 for (;;) {
47084 int bufs;
47085
47086- if (!pipe->readers) {
47087+ if (!atomic_read(&pipe->readers)) {
47088 send_sig(SIGPIPE, current, 0);
47089 if (!ret)
47090 ret = -EPIPE;
47091@@ -644,9 +644,9 @@ redo2:
47092 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
47093 do_wakeup = 0;
47094 }
47095- pipe->waiting_writers++;
47096+ atomic_inc(&pipe->waiting_writers);
47097 pipe_wait(pipe);
47098- pipe->waiting_writers--;
47099+ atomic_dec(&pipe->waiting_writers);
47100 }
47101 out:
47102 mutex_unlock(&inode->i_mutex);
47103@@ -713,7 +713,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47104 mask = 0;
47105 if (filp->f_mode & FMODE_READ) {
47106 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
47107- if (!pipe->writers && filp->f_version != pipe->w_counter)
47108+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
47109 mask |= POLLHUP;
47110 }
47111
47112@@ -723,7 +723,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47113 * Most Unices do not set POLLERR for FIFOs but on Linux they
47114 * behave exactly like pipes for poll().
47115 */
47116- if (!pipe->readers)
47117+ if (!atomic_read(&pipe->readers))
47118 mask |= POLLERR;
47119 }
47120
47121@@ -737,10 +737,10 @@ pipe_release(struct inode *inode, int decr, int decw)
47122
47123 mutex_lock(&inode->i_mutex);
47124 pipe = inode->i_pipe;
47125- pipe->readers -= decr;
47126- pipe->writers -= decw;
47127+ atomic_sub(decr, &pipe->readers);
47128+ atomic_sub(decw, &pipe->writers);
47129
47130- if (!pipe->readers && !pipe->writers) {
47131+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
47132 free_pipe_info(inode);
47133 } else {
47134 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
47135@@ -830,7 +830,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
47136
47137 if (inode->i_pipe) {
47138 ret = 0;
47139- inode->i_pipe->readers++;
47140+ atomic_inc(&inode->i_pipe->readers);
47141 }
47142
47143 mutex_unlock(&inode->i_mutex);
47144@@ -847,7 +847,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
47145
47146 if (inode->i_pipe) {
47147 ret = 0;
47148- inode->i_pipe->writers++;
47149+ atomic_inc(&inode->i_pipe->writers);
47150 }
47151
47152 mutex_unlock(&inode->i_mutex);
47153@@ -865,9 +865,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
47154 if (inode->i_pipe) {
47155 ret = 0;
47156 if (filp->f_mode & FMODE_READ)
47157- inode->i_pipe->readers++;
47158+ atomic_inc(&inode->i_pipe->readers);
47159 if (filp->f_mode & FMODE_WRITE)
47160- inode->i_pipe->writers++;
47161+ atomic_inc(&inode->i_pipe->writers);
47162 }
47163
47164 mutex_unlock(&inode->i_mutex);
47165@@ -959,7 +959,7 @@ void free_pipe_info(struct inode *inode)
47166 inode->i_pipe = NULL;
47167 }
47168
47169-static struct vfsmount *pipe_mnt __read_mostly;
47170+struct vfsmount *pipe_mnt __read_mostly;
47171
47172 /*
47173 * pipefs_dname() is called from d_path().
47174@@ -989,7 +989,8 @@ static struct inode * get_pipe_inode(void)
47175 goto fail_iput;
47176 inode->i_pipe = pipe;
47177
47178- pipe->readers = pipe->writers = 1;
47179+ atomic_set(&pipe->readers, 1);
47180+ atomic_set(&pipe->writers, 1);
47181 inode->i_fop = &rdwr_pipefifo_fops;
47182
47183 /*
47184diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
47185index 15af622..0e9f4467 100644
47186--- a/fs/proc/Kconfig
47187+++ b/fs/proc/Kconfig
47188@@ -30,12 +30,12 @@ config PROC_FS
47189
47190 config PROC_KCORE
47191 bool "/proc/kcore support" if !ARM
47192- depends on PROC_FS && MMU
47193+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
47194
47195 config PROC_VMCORE
47196 bool "/proc/vmcore support"
47197- depends on PROC_FS && CRASH_DUMP
47198- default y
47199+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
47200+ default n
47201 help
47202 Exports the dump image of crashed kernel in ELF format.
47203
47204@@ -59,8 +59,8 @@ config PROC_SYSCTL
47205 limited in memory.
47206
47207 config PROC_PAGE_MONITOR
47208- default y
47209- depends on PROC_FS && MMU
47210+ default n
47211+ depends on PROC_FS && MMU && !GRKERNSEC
47212 bool "Enable /proc page monitoring" if EXPERT
47213 help
47214 Various /proc files exist to monitor process memory utilization:
47215diff --git a/fs/proc/array.c b/fs/proc/array.c
47216index f9bd395..acb7847 100644
47217--- a/fs/proc/array.c
47218+++ b/fs/proc/array.c
47219@@ -60,6 +60,7 @@
47220 #include <linux/tty.h>
47221 #include <linux/string.h>
47222 #include <linux/mman.h>
47223+#include <linux/grsecurity.h>
47224 #include <linux/proc_fs.h>
47225 #include <linux/ioport.h>
47226 #include <linux/uaccess.h>
47227@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
47228 seq_putc(m, '\n');
47229 }
47230
47231+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47232+static inline void task_pax(struct seq_file *m, struct task_struct *p)
47233+{
47234+ if (p->mm)
47235+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
47236+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
47237+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
47238+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
47239+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
47240+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
47241+ else
47242+ seq_printf(m, "PaX:\t-----\n");
47243+}
47244+#endif
47245+
47246 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47247 struct pid *pid, struct task_struct *task)
47248 {
47249@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47250 task_cpus_allowed(m, task);
47251 cpuset_task_status_allowed(m, task);
47252 task_context_switch_counts(m, task);
47253+
47254+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47255+ task_pax(m, task);
47256+#endif
47257+
47258+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
47259+ task_grsec_rbac(m, task);
47260+#endif
47261+
47262 return 0;
47263 }
47264
47265+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47266+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47267+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
47268+ _mm->pax_flags & MF_PAX_SEGMEXEC))
47269+#endif
47270+
47271 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47272 struct pid *pid, struct task_struct *task, int whole)
47273 {
47274@@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47275 char tcomm[sizeof(task->comm)];
47276 unsigned long flags;
47277
47278+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47279+ if (current->exec_id != m->exec_id) {
47280+ gr_log_badprocpid("stat");
47281+ return 0;
47282+ }
47283+#endif
47284+
47285 state = *get_task_state(task);
47286 vsize = eip = esp = 0;
47287 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
47288@@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47289 gtime = task->gtime;
47290 }
47291
47292+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47293+ if (PAX_RAND_FLAGS(mm)) {
47294+ eip = 0;
47295+ esp = 0;
47296+ wchan = 0;
47297+ }
47298+#endif
47299+#ifdef CONFIG_GRKERNSEC_HIDESYM
47300+ wchan = 0;
47301+ eip =0;
47302+ esp =0;
47303+#endif
47304+
47305 /* scale priority and nice values from timeslices to -20..20 */
47306 /* to make it look like a "normal" Unix priority/nice value */
47307 priority = task_prio(task);
47308@@ -485,9 +536,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47309 seq_put_decimal_ull(m, ' ', vsize);
47310 seq_put_decimal_ll(m, ' ', mm ? get_mm_rss(mm) : 0);
47311 seq_put_decimal_ull(m, ' ', rsslim);
47312+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47313+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
47314+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
47315+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
47316+#else
47317 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
47318 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
47319 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
47320+#endif
47321 seq_put_decimal_ull(m, ' ', esp);
47322 seq_put_decimal_ull(m, ' ', eip);
47323 /* The signal information here is obsolete.
47324@@ -508,9 +565,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47325 seq_put_decimal_ull(m, ' ', delayacct_blkio_ticks(task));
47326 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
47327 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
47328+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47329+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((mm && permitted) ? mm->start_data : 0));
47330+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((mm && permitted) ? mm->end_data : 0));
47331+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((mm && permitted) ? mm->start_brk : 0));
47332+#else
47333 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_data : 0);
47334 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->end_data : 0);
47335 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_brk : 0);
47336+#endif
47337 seq_putc(m, '\n');
47338 if (mm)
47339 mmput(mm);
47340@@ -533,8 +596,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47341 struct pid *pid, struct task_struct *task)
47342 {
47343 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
47344- struct mm_struct *mm = get_task_mm(task);
47345+ struct mm_struct *mm;
47346
47347+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47348+ if (current->exec_id != m->exec_id) {
47349+ gr_log_badprocpid("statm");
47350+ return 0;
47351+ }
47352+#endif
47353+ mm = get_task_mm(task);
47354 if (mm) {
47355 size = task_statm(mm, &shared, &text, &data, &resident);
47356 mmput(mm);
47357@@ -556,3 +626,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47358
47359 return 0;
47360 }
47361+
47362+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47363+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
47364+{
47365+ u32 curr_ip = 0;
47366+ unsigned long flags;
47367+
47368+ if (lock_task_sighand(task, &flags)) {
47369+ curr_ip = task->signal->curr_ip;
47370+ unlock_task_sighand(task, &flags);
47371+ }
47372+
47373+ return sprintf(buffer, "%pI4\n", &curr_ip);
47374+}
47375+#endif
47376diff --git a/fs/proc/base.c b/fs/proc/base.c
47377index 9fc77b4..04761b8 100644
47378--- a/fs/proc/base.c
47379+++ b/fs/proc/base.c
47380@@ -109,6 +109,14 @@ struct pid_entry {
47381 union proc_op op;
47382 };
47383
47384+struct getdents_callback {
47385+ struct linux_dirent __user * current_dir;
47386+ struct linux_dirent __user * previous;
47387+ struct file * file;
47388+ int count;
47389+ int error;
47390+};
47391+
47392 #define NOD(NAME, MODE, IOP, FOP, OP) { \
47393 .name = (NAME), \
47394 .len = sizeof(NAME) - 1, \
47395@@ -213,6 +221,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
47396 if (!mm->arg_end)
47397 goto out_mm; /* Shh! No looking before we're done */
47398
47399+ if (gr_acl_handle_procpidmem(task))
47400+ goto out_mm;
47401+
47402 len = mm->arg_end - mm->arg_start;
47403
47404 if (len > PAGE_SIZE)
47405@@ -240,12 +251,28 @@ out:
47406 return res;
47407 }
47408
47409+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47410+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47411+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
47412+ _mm->pax_flags & MF_PAX_SEGMEXEC))
47413+#endif
47414+
47415 static int proc_pid_auxv(struct task_struct *task, char *buffer)
47416 {
47417 struct mm_struct *mm = mm_for_maps(task);
47418 int res = PTR_ERR(mm);
47419 if (mm && !IS_ERR(mm)) {
47420 unsigned int nwords = 0;
47421+
47422+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47423+ /* allow if we're currently ptracing this task */
47424+ if (PAX_RAND_FLAGS(mm) &&
47425+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
47426+ mmput(mm);
47427+ return 0;
47428+ }
47429+#endif
47430+
47431 do {
47432 nwords += 2;
47433 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
47434@@ -259,7 +286,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
47435 }
47436
47437
47438-#ifdef CONFIG_KALLSYMS
47439+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47440 /*
47441 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
47442 * Returns the resolved symbol. If that fails, simply return the address.
47443@@ -298,7 +325,7 @@ static void unlock_trace(struct task_struct *task)
47444 mutex_unlock(&task->signal->cred_guard_mutex);
47445 }
47446
47447-#ifdef CONFIG_STACKTRACE
47448+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47449
47450 #define MAX_STACK_TRACE_DEPTH 64
47451
47452@@ -489,7 +516,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
47453 return count;
47454 }
47455
47456-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47457+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47458 static int proc_pid_syscall(struct task_struct *task, char *buffer)
47459 {
47460 long nr;
47461@@ -518,7 +545,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
47462 /************************************************************************/
47463
47464 /* permission checks */
47465-static int proc_fd_access_allowed(struct inode *inode)
47466+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
47467 {
47468 struct task_struct *task;
47469 int allowed = 0;
47470@@ -528,7 +555,10 @@ static int proc_fd_access_allowed(struct inode *inode)
47471 */
47472 task = get_proc_task(inode);
47473 if (task) {
47474- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47475+ if (log)
47476+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47477+ else
47478+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
47479 put_task_struct(task);
47480 }
47481 return allowed;
47482@@ -566,10 +596,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
47483 struct task_struct *task,
47484 int hide_pid_min)
47485 {
47486+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47487+ return false;
47488+
47489+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47490+ rcu_read_lock();
47491+ {
47492+ const struct cred *tmpcred = current_cred();
47493+ const struct cred *cred = __task_cred(task);
47494+
47495+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
47496+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47497+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
47498+#endif
47499+ ) {
47500+ rcu_read_unlock();
47501+ return true;
47502+ }
47503+ }
47504+ rcu_read_unlock();
47505+
47506+ if (!pid->hide_pid)
47507+ return false;
47508+#endif
47509+
47510 if (pid->hide_pid < hide_pid_min)
47511 return true;
47512 if (in_group_p(pid->pid_gid))
47513 return true;
47514+
47515 return ptrace_may_access(task, PTRACE_MODE_READ);
47516 }
47517
47518@@ -587,7 +642,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
47519 put_task_struct(task);
47520
47521 if (!has_perms) {
47522+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47523+ {
47524+#else
47525 if (pid->hide_pid == 2) {
47526+#endif
47527 /*
47528 * Let's make getdents(), stat(), and open()
47529 * consistent with each other. If a process
47530@@ -702,6 +761,10 @@ static int mem_open(struct inode* inode, struct file* file)
47531 file->f_mode |= FMODE_UNSIGNED_OFFSET;
47532 file->private_data = mm;
47533
47534+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47535+ file->f_version = current->exec_id;
47536+#endif
47537+
47538 return 0;
47539 }
47540
47541@@ -713,6 +776,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
47542 ssize_t copied;
47543 char *page;
47544
47545+#ifdef CONFIG_GRKERNSEC
47546+ if (write)
47547+ return -EPERM;
47548+#endif
47549+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47550+ if (file->f_version != current->exec_id) {
47551+ gr_log_badprocpid("mem");
47552+ return 0;
47553+ }
47554+#endif
47555+
47556 if (!mm)
47557 return 0;
47558
47559@@ -813,6 +887,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
47560 if (!task)
47561 goto out_no_task;
47562
47563+ if (gr_acl_handle_procpidmem(task))
47564+ goto out;
47565+
47566 ret = -ENOMEM;
47567 page = (char *)__get_free_page(GFP_TEMPORARY);
47568 if (!page)
47569@@ -1433,7 +1510,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
47570 path_put(&nd->path);
47571
47572 /* Are we allowed to snoop on the tasks file descriptors? */
47573- if (!proc_fd_access_allowed(inode))
47574+ if (!proc_fd_access_allowed(inode, 0))
47575 goto out;
47576
47577 error = PROC_I(inode)->op.proc_get_link(dentry, &nd->path);
47578@@ -1472,8 +1549,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
47579 struct path path;
47580
47581 /* Are we allowed to snoop on the tasks file descriptors? */
47582- if (!proc_fd_access_allowed(inode))
47583- goto out;
47584+ /* logging this is needed for learning on chromium to work properly,
47585+ but we don't want to flood the logs from 'ps' which does a readlink
47586+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
47587+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
47588+ */
47589+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
47590+ if (!proc_fd_access_allowed(inode,0))
47591+ goto out;
47592+ } else {
47593+ if (!proc_fd_access_allowed(inode,1))
47594+ goto out;
47595+ }
47596
47597 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
47598 if (error)
47599@@ -1538,7 +1625,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
47600 rcu_read_lock();
47601 cred = __task_cred(task);
47602 inode->i_uid = cred->euid;
47603+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47604+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47605+#else
47606 inode->i_gid = cred->egid;
47607+#endif
47608 rcu_read_unlock();
47609 }
47610 security_task_to_inode(task, inode);
47611@@ -1574,10 +1665,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
47612 return -ENOENT;
47613 }
47614 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
47615+#ifdef CONFIG_GRKERNSEC_PROC_USER
47616+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
47617+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47618+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
47619+#endif
47620 task_dumpable(task)) {
47621 cred = __task_cred(task);
47622 stat->uid = cred->euid;
47623+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47624+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
47625+#else
47626 stat->gid = cred->egid;
47627+#endif
47628 }
47629 }
47630 rcu_read_unlock();
47631@@ -1615,11 +1715,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
47632
47633 if (task) {
47634 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
47635+#ifdef CONFIG_GRKERNSEC_PROC_USER
47636+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
47637+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47638+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
47639+#endif
47640 task_dumpable(task)) {
47641 rcu_read_lock();
47642 cred = __task_cred(task);
47643 inode->i_uid = cred->euid;
47644+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47645+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47646+#else
47647 inode->i_gid = cred->egid;
47648+#endif
47649 rcu_read_unlock();
47650 } else {
47651 inode->i_uid = 0;
47652@@ -1737,7 +1846,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
47653 int fd = proc_fd(inode);
47654
47655 if (task) {
47656- files = get_files_struct(task);
47657+ if (!gr_acl_handle_procpidmem(task))
47658+ files = get_files_struct(task);
47659 put_task_struct(task);
47660 }
47661 if (files) {
47662@@ -2338,11 +2448,21 @@ static const struct file_operations proc_map_files_operations = {
47663 */
47664 static int proc_fd_permission(struct inode *inode, int mask)
47665 {
47666+ struct task_struct *task;
47667 int rv = generic_permission(inode, mask);
47668- if (rv == 0)
47669- return 0;
47670+
47671 if (task_pid(current) == proc_pid(inode))
47672 rv = 0;
47673+
47674+ task = get_proc_task(inode);
47675+ if (task == NULL)
47676+ return rv;
47677+
47678+ if (gr_acl_handle_procpidmem(task))
47679+ rv = -EACCES;
47680+
47681+ put_task_struct(task);
47682+
47683 return rv;
47684 }
47685
47686@@ -2452,6 +2572,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
47687 if (!task)
47688 goto out_no_task;
47689
47690+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47691+ goto out;
47692+
47693 /*
47694 * Yes, it does not scale. And it should not. Don't add
47695 * new entries into /proc/<tgid>/ without very good reasons.
47696@@ -2496,6 +2619,9 @@ static int proc_pident_readdir(struct file *filp,
47697 if (!task)
47698 goto out_no_task;
47699
47700+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47701+ goto out;
47702+
47703 ret = 0;
47704 i = filp->f_pos;
47705 switch (i) {
47706@@ -2766,7 +2892,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
47707 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
47708 void *cookie)
47709 {
47710- char *s = nd_get_link(nd);
47711+ const char *s = nd_get_link(nd);
47712 if (!IS_ERR(s))
47713 __putname(s);
47714 }
47715@@ -2967,7 +3093,7 @@ static const struct pid_entry tgid_base_stuff[] = {
47716 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
47717 #endif
47718 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
47719-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47720+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47721 INF("syscall", S_IRUGO, proc_pid_syscall),
47722 #endif
47723 INF("cmdline", S_IRUGO, proc_pid_cmdline),
47724@@ -2992,10 +3118,10 @@ static const struct pid_entry tgid_base_stuff[] = {
47725 #ifdef CONFIG_SECURITY
47726 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
47727 #endif
47728-#ifdef CONFIG_KALLSYMS
47729+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47730 INF("wchan", S_IRUGO, proc_pid_wchan),
47731 #endif
47732-#ifdef CONFIG_STACKTRACE
47733+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47734 ONE("stack", S_IRUGO, proc_pid_stack),
47735 #endif
47736 #ifdef CONFIG_SCHEDSTATS
47737@@ -3029,6 +3155,9 @@ static const struct pid_entry tgid_base_stuff[] = {
47738 #ifdef CONFIG_HARDWALL
47739 INF("hardwall", S_IRUGO, proc_pid_hardwall),
47740 #endif
47741+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47742+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
47743+#endif
47744 };
47745
47746 static int proc_tgid_base_readdir(struct file * filp,
47747@@ -3155,7 +3284,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
47748 if (!inode)
47749 goto out;
47750
47751+#ifdef CONFIG_GRKERNSEC_PROC_USER
47752+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
47753+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47754+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47755+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
47756+#else
47757 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
47758+#endif
47759 inode->i_op = &proc_tgid_base_inode_operations;
47760 inode->i_fop = &proc_tgid_base_operations;
47761 inode->i_flags|=S_IMMUTABLE;
47762@@ -3197,7 +3333,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
47763 if (!task)
47764 goto out;
47765
47766+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47767+ goto out_put_task;
47768+
47769 result = proc_pid_instantiate(dir, dentry, task, NULL);
47770+out_put_task:
47771 put_task_struct(task);
47772 out:
47773 return result;
47774@@ -3260,6 +3400,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
47775 static int fake_filldir(void *buf, const char *name, int namelen,
47776 loff_t offset, u64 ino, unsigned d_type)
47777 {
47778+ struct getdents_callback * __buf = (struct getdents_callback *) buf;
47779+ __buf->error = -EINVAL;
47780 return 0;
47781 }
47782
47783@@ -3326,7 +3468,7 @@ static const struct pid_entry tid_base_stuff[] = {
47784 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
47785 #endif
47786 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
47787-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47788+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47789 INF("syscall", S_IRUGO, proc_pid_syscall),
47790 #endif
47791 INF("cmdline", S_IRUGO, proc_pid_cmdline),
47792@@ -3350,10 +3492,10 @@ static const struct pid_entry tid_base_stuff[] = {
47793 #ifdef CONFIG_SECURITY
47794 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
47795 #endif
47796-#ifdef CONFIG_KALLSYMS
47797+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47798 INF("wchan", S_IRUGO, proc_pid_wchan),
47799 #endif
47800-#ifdef CONFIG_STACKTRACE
47801+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47802 ONE("stack", S_IRUGO, proc_pid_stack),
47803 #endif
47804 #ifdef CONFIG_SCHEDSTATS
47805diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
47806index 82676e3..5f8518a 100644
47807--- a/fs/proc/cmdline.c
47808+++ b/fs/proc/cmdline.c
47809@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
47810
47811 static int __init proc_cmdline_init(void)
47812 {
47813+#ifdef CONFIG_GRKERNSEC_PROC_ADD
47814+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
47815+#else
47816 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
47817+#endif
47818 return 0;
47819 }
47820 module_init(proc_cmdline_init);
47821diff --git a/fs/proc/devices.c b/fs/proc/devices.c
47822index b143471..bb105e5 100644
47823--- a/fs/proc/devices.c
47824+++ b/fs/proc/devices.c
47825@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
47826
47827 static int __init proc_devices_init(void)
47828 {
47829+#ifdef CONFIG_GRKERNSEC_PROC_ADD
47830+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
47831+#else
47832 proc_create("devices", 0, NULL, &proc_devinfo_operations);
47833+#endif
47834 return 0;
47835 }
47836 module_init(proc_devices_init);
47837diff --git a/fs/proc/inode.c b/fs/proc/inode.c
47838index 205c922..2ee4c57 100644
47839--- a/fs/proc/inode.c
47840+++ b/fs/proc/inode.c
47841@@ -21,11 +21,17 @@
47842 #include <linux/seq_file.h>
47843 #include <linux/slab.h>
47844 #include <linux/mount.h>
47845+#include <linux/grsecurity.h>
47846
47847 #include <asm/uaccess.h>
47848
47849 #include "internal.h"
47850
47851+#ifdef CONFIG_PROC_SYSCTL
47852+extern const struct inode_operations proc_sys_inode_operations;
47853+extern const struct inode_operations proc_sys_dir_operations;
47854+#endif
47855+
47856 static void proc_evict_inode(struct inode *inode)
47857 {
47858 struct proc_dir_entry *de;
47859@@ -51,6 +57,13 @@ static void proc_evict_inode(struct inode *inode)
47860 ns_ops = PROC_I(inode)->ns_ops;
47861 if (ns_ops && ns_ops->put)
47862 ns_ops->put(PROC_I(inode)->ns);
47863+
47864+#ifdef CONFIG_PROC_SYSCTL
47865+ if (inode->i_op == &proc_sys_inode_operations ||
47866+ inode->i_op == &proc_sys_dir_operations)
47867+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
47868+#endif
47869+
47870 }
47871
47872 static struct kmem_cache * proc_inode_cachep;
47873@@ -456,7 +469,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
47874 if (de->mode) {
47875 inode->i_mode = de->mode;
47876 inode->i_uid = de->uid;
47877+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47878+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47879+#else
47880 inode->i_gid = de->gid;
47881+#endif
47882 }
47883 if (de->size)
47884 inode->i_size = de->size;
47885diff --git a/fs/proc/internal.h b/fs/proc/internal.h
47886index 5f79bb8..eeccee4 100644
47887--- a/fs/proc/internal.h
47888+++ b/fs/proc/internal.h
47889@@ -54,6 +54,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47890 struct pid *pid, struct task_struct *task);
47891 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47892 struct pid *pid, struct task_struct *task);
47893+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47894+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
47895+#endif
47896 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
47897
47898 extern const struct file_operations proc_pid_maps_operations;
47899diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
47900index 86c67ee..cdca321 100644
47901--- a/fs/proc/kcore.c
47902+++ b/fs/proc/kcore.c
47903@@ -480,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47904 * the addresses in the elf_phdr on our list.
47905 */
47906 start = kc_offset_to_vaddr(*fpos - elf_buflen);
47907- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
47908+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
47909+ if (tsz > buflen)
47910 tsz = buflen;
47911-
47912+
47913 while (buflen) {
47914 struct kcore_list *m;
47915
47916@@ -511,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47917 kfree(elf_buf);
47918 } else {
47919 if (kern_addr_valid(start)) {
47920- unsigned long n;
47921+ char *elf_buf;
47922+ mm_segment_t oldfs;
47923
47924- n = copy_to_user(buffer, (char *)start, tsz);
47925- /*
47926- * We cannot distinguish between fault on source
47927- * and fault on destination. When this happens
47928- * we clear too and hope it will trigger the
47929- * EFAULT again.
47930- */
47931- if (n) {
47932- if (clear_user(buffer + tsz - n,
47933- n))
47934+ elf_buf = kmalloc(tsz, GFP_KERNEL);
47935+ if (!elf_buf)
47936+ return -ENOMEM;
47937+ oldfs = get_fs();
47938+ set_fs(KERNEL_DS);
47939+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
47940+ set_fs(oldfs);
47941+ if (copy_to_user(buffer, elf_buf, tsz)) {
47942+ kfree(elf_buf);
47943 return -EFAULT;
47944+ }
47945 }
47946+ set_fs(oldfs);
47947+ kfree(elf_buf);
47948 } else {
47949 if (clear_user(buffer, tsz))
47950 return -EFAULT;
47951@@ -544,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47952
47953 static int open_kcore(struct inode *inode, struct file *filp)
47954 {
47955+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
47956+ return -EPERM;
47957+#endif
47958 if (!capable(CAP_SYS_RAWIO))
47959 return -EPERM;
47960 if (kcore_need_update)
47961diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
47962index 80e4645..53e5fcf 100644
47963--- a/fs/proc/meminfo.c
47964+++ b/fs/proc/meminfo.c
47965@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
47966 vmi.used >> 10,
47967 vmi.largest_chunk >> 10
47968 #ifdef CONFIG_MEMORY_FAILURE
47969- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
47970+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
47971 #endif
47972 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
47973 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
47974diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
47975index b1822dd..df622cb 100644
47976--- a/fs/proc/nommu.c
47977+++ b/fs/proc/nommu.c
47978@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
47979 if (len < 1)
47980 len = 1;
47981 seq_printf(m, "%*c", len, ' ');
47982- seq_path(m, &file->f_path, "");
47983+ seq_path(m, &file->f_path, "\n\\");
47984 }
47985
47986 seq_putc(m, '\n');
47987diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
47988index 06e1cc1..177cd98 100644
47989--- a/fs/proc/proc_net.c
47990+++ b/fs/proc/proc_net.c
47991@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
47992 struct task_struct *task;
47993 struct nsproxy *ns;
47994 struct net *net = NULL;
47995+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47996+ const struct cred *cred = current_cred();
47997+#endif
47998+
47999+#ifdef CONFIG_GRKERNSEC_PROC_USER
48000+ if (cred->fsuid)
48001+ return net;
48002+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48003+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
48004+ return net;
48005+#endif
48006
48007 rcu_read_lock();
48008 task = pid_task(proc_pid(dir), PIDTYPE_PID);
48009diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
48010index 21d836f..bebf3ee 100644
48011--- a/fs/proc/proc_sysctl.c
48012+++ b/fs/proc/proc_sysctl.c
48013@@ -12,11 +12,15 @@
48014 #include <linux/module.h>
48015 #include "internal.h"
48016
48017+extern int gr_handle_chroot_sysctl(const int op);
48018+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
48019+ const int op);
48020+
48021 static const struct dentry_operations proc_sys_dentry_operations;
48022 static const struct file_operations proc_sys_file_operations;
48023-static const struct inode_operations proc_sys_inode_operations;
48024+const struct inode_operations proc_sys_inode_operations;
48025 static const struct file_operations proc_sys_dir_file_operations;
48026-static const struct inode_operations proc_sys_dir_operations;
48027+const struct inode_operations proc_sys_dir_operations;
48028
48029 void proc_sys_poll_notify(struct ctl_table_poll *poll)
48030 {
48031@@ -470,8 +474,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
48032
48033 err = NULL;
48034 d_set_d_op(dentry, &proc_sys_dentry_operations);
48035+
48036+ gr_handle_proc_create(dentry, inode);
48037+
48038 d_add(dentry, inode);
48039
48040+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt))
48041+ err = ERR_PTR(-ENOENT);
48042+
48043 out:
48044 sysctl_head_finish(head);
48045 return err;
48046@@ -483,18 +493,20 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
48047 struct inode *inode = filp->f_path.dentry->d_inode;
48048 struct ctl_table_header *head = grab_header(inode);
48049 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
48050+ int op = write ? MAY_WRITE : MAY_READ;
48051 ssize_t error;
48052 size_t res;
48053
48054 if (IS_ERR(head))
48055 return PTR_ERR(head);
48056
48057+
48058 /*
48059 * At this point we know that the sysctl was not unregistered
48060 * and won't be until we finish.
48061 */
48062 error = -EPERM;
48063- if (sysctl_perm(head->root, table, write ? MAY_WRITE : MAY_READ))
48064+ if (sysctl_perm(head->root, table, op))
48065 goto out;
48066
48067 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
48068@@ -502,6 +514,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
48069 if (!table->proc_handler)
48070 goto out;
48071
48072+#ifdef CONFIG_GRKERNSEC
48073+ error = -EPERM;
48074+ if (gr_handle_chroot_sysctl(op))
48075+ goto out;
48076+ dget(filp->f_path.dentry);
48077+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
48078+ dput(filp->f_path.dentry);
48079+ goto out;
48080+ }
48081+ dput(filp->f_path.dentry);
48082+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
48083+ goto out;
48084+ if (write && !capable(CAP_SYS_ADMIN))
48085+ goto out;
48086+#endif
48087+
48088 /* careful: calling conventions are nasty here */
48089 res = count;
48090 error = table->proc_handler(table, write, buf, &res, ppos);
48091@@ -599,6 +627,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
48092 return -ENOMEM;
48093 } else {
48094 d_set_d_op(child, &proc_sys_dentry_operations);
48095+
48096+ gr_handle_proc_create(child, inode);
48097+
48098 d_add(child, inode);
48099 }
48100 } else {
48101@@ -642,6 +673,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
48102 if ((*pos)++ < file->f_pos)
48103 return 0;
48104
48105+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
48106+ return 0;
48107+
48108 if (unlikely(S_ISLNK(table->mode)))
48109 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
48110 else
48111@@ -759,6 +793,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
48112 if (IS_ERR(head))
48113 return PTR_ERR(head);
48114
48115+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
48116+ return -ENOENT;
48117+
48118 generic_fillattr(inode, stat);
48119 if (table)
48120 stat->mode = (stat->mode & S_IFMT) | table->mode;
48121@@ -781,13 +818,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
48122 .llseek = generic_file_llseek,
48123 };
48124
48125-static const struct inode_operations proc_sys_inode_operations = {
48126+const struct inode_operations proc_sys_inode_operations = {
48127 .permission = proc_sys_permission,
48128 .setattr = proc_sys_setattr,
48129 .getattr = proc_sys_getattr,
48130 };
48131
48132-static const struct inode_operations proc_sys_dir_operations = {
48133+const struct inode_operations proc_sys_dir_operations = {
48134 .lookup = proc_sys_lookup,
48135 .permission = proc_sys_permission,
48136 .setattr = proc_sys_setattr,
48137diff --git a/fs/proc/root.c b/fs/proc/root.c
48138index eed44bf..abeb499 100644
48139--- a/fs/proc/root.c
48140+++ b/fs/proc/root.c
48141@@ -188,7 +188,15 @@ void __init proc_root_init(void)
48142 #ifdef CONFIG_PROC_DEVICETREE
48143 proc_device_tree_init();
48144 #endif
48145+#ifdef CONFIG_GRKERNSEC_PROC_ADD
48146+#ifdef CONFIG_GRKERNSEC_PROC_USER
48147+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
48148+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48149+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
48150+#endif
48151+#else
48152 proc_mkdir("bus", NULL);
48153+#endif
48154 proc_sys_init();
48155 }
48156
48157diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
48158index 7faaf2a..096c28b 100644
48159--- a/fs/proc/task_mmu.c
48160+++ b/fs/proc/task_mmu.c
48161@@ -11,12 +11,19 @@
48162 #include <linux/rmap.h>
48163 #include <linux/swap.h>
48164 #include <linux/swapops.h>
48165+#include <linux/grsecurity.h>
48166
48167 #include <asm/elf.h>
48168 #include <asm/uaccess.h>
48169 #include <asm/tlbflush.h>
48170 #include "internal.h"
48171
48172+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48173+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48174+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
48175+ _mm->pax_flags & MF_PAX_SEGMEXEC))
48176+#endif
48177+
48178 void task_mem(struct seq_file *m, struct mm_struct *mm)
48179 {
48180 unsigned long data, text, lib, swap;
48181@@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48182 "VmExe:\t%8lu kB\n"
48183 "VmLib:\t%8lu kB\n"
48184 "VmPTE:\t%8lu kB\n"
48185- "VmSwap:\t%8lu kB\n",
48186- hiwater_vm << (PAGE_SHIFT-10),
48187+ "VmSwap:\t%8lu kB\n"
48188+
48189+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48190+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
48191+#endif
48192+
48193+ ,hiwater_vm << (PAGE_SHIFT-10),
48194 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
48195 mm->locked_vm << (PAGE_SHIFT-10),
48196 mm->pinned_vm << (PAGE_SHIFT-10),
48197@@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48198 data << (PAGE_SHIFT-10),
48199 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
48200 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
48201- swap << (PAGE_SHIFT-10));
48202+ swap << (PAGE_SHIFT-10)
48203+
48204+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48205+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48206+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
48207+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
48208+#else
48209+ , mm->context.user_cs_base
48210+ , mm->context.user_cs_limit
48211+#endif
48212+#endif
48213+
48214+ );
48215 }
48216
48217 unsigned long task_vsize(struct mm_struct *mm)
48218@@ -231,13 +255,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48219 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
48220 }
48221
48222- /* We don't show the stack guard page in /proc/maps */
48223+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48224+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
48225+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
48226+#else
48227 start = vma->vm_start;
48228- if (stack_guard_page_start(vma, start))
48229- start += PAGE_SIZE;
48230 end = vma->vm_end;
48231- if (stack_guard_page_end(vma, end))
48232- end -= PAGE_SIZE;
48233+#endif
48234
48235 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
48236 start,
48237@@ -246,7 +270,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48238 flags & VM_WRITE ? 'w' : '-',
48239 flags & VM_EXEC ? 'x' : '-',
48240 flags & VM_MAYSHARE ? 's' : 'p',
48241+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48242+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
48243+#else
48244 pgoff,
48245+#endif
48246 MAJOR(dev), MINOR(dev), ino, &len);
48247
48248 /*
48249@@ -255,7 +283,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48250 */
48251 if (file) {
48252 pad_len_spaces(m, len);
48253- seq_path(m, &file->f_path, "\n");
48254+ seq_path(m, &file->f_path, "\n\\");
48255 goto done;
48256 }
48257
48258@@ -281,8 +309,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48259 * Thread stack in /proc/PID/task/TID/maps or
48260 * the main process stack.
48261 */
48262- if (!is_pid || (vma->vm_start <= mm->start_stack &&
48263- vma->vm_end >= mm->start_stack)) {
48264+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
48265+ (vma->vm_start <= mm->start_stack &&
48266+ vma->vm_end >= mm->start_stack)) {
48267 name = "[stack]";
48268 } else {
48269 /* Thread stack in /proc/PID/maps */
48270@@ -306,6 +335,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
48271 struct proc_maps_private *priv = m->private;
48272 struct task_struct *task = priv->task;
48273
48274+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48275+ if (current->exec_id != m->exec_id) {
48276+ gr_log_badprocpid("maps");
48277+ return 0;
48278+ }
48279+#endif
48280+
48281 show_map_vma(m, vma, is_pid);
48282
48283 if (m->count < m->size) /* vma is copied successfully */
48284@@ -482,12 +518,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
48285 .private = &mss,
48286 };
48287
48288+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48289+ if (current->exec_id != m->exec_id) {
48290+ gr_log_badprocpid("smaps");
48291+ return 0;
48292+ }
48293+#endif
48294 memset(&mss, 0, sizeof mss);
48295- mss.vma = vma;
48296- /* mmap_sem is held in m_start */
48297- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48298- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48299-
48300+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48301+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
48302+#endif
48303+ mss.vma = vma;
48304+ /* mmap_sem is held in m_start */
48305+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48306+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48307+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48308+ }
48309+#endif
48310 show_map_vma(m, vma, is_pid);
48311
48312 seq_printf(m,
48313@@ -505,7 +552,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
48314 "KernelPageSize: %8lu kB\n"
48315 "MMUPageSize: %8lu kB\n"
48316 "Locked: %8lu kB\n",
48317+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48318+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
48319+#else
48320 (vma->vm_end - vma->vm_start) >> 10,
48321+#endif
48322 mss.resident >> 10,
48323 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
48324 mss.shared_clean >> 10,
48325@@ -1138,6 +1189,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
48326 int n;
48327 char buffer[50];
48328
48329+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48330+ if (current->exec_id != m->exec_id) {
48331+ gr_log_badprocpid("numa_maps");
48332+ return 0;
48333+ }
48334+#endif
48335+
48336 if (!mm)
48337 return 0;
48338
48339@@ -1155,11 +1213,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
48340 mpol_to_str(buffer, sizeof(buffer), pol, 0);
48341 mpol_cond_put(pol);
48342
48343+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48344+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
48345+#else
48346 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
48347+#endif
48348
48349 if (file) {
48350 seq_printf(m, " file=");
48351- seq_path(m, &file->f_path, "\n\t= ");
48352+ seq_path(m, &file->f_path, "\n\t\\= ");
48353 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
48354 seq_printf(m, " heap");
48355 } else {
48356diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
48357index 74fe164..899e77b 100644
48358--- a/fs/proc/task_nommu.c
48359+++ b/fs/proc/task_nommu.c
48360@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48361 else
48362 bytes += kobjsize(mm);
48363
48364- if (current->fs && current->fs->users > 1)
48365+ if (current->fs && atomic_read(&current->fs->users) > 1)
48366 sbytes += kobjsize(current->fs);
48367 else
48368 bytes += kobjsize(current->fs);
48369@@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
48370
48371 if (file) {
48372 pad_len_spaces(m, len);
48373- seq_path(m, &file->f_path, "");
48374+ seq_path(m, &file->f_path, "\n\\");
48375 } else if (mm) {
48376 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
48377
48378diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
48379index d67908b..d13f6a6 100644
48380--- a/fs/quota/netlink.c
48381+++ b/fs/quota/netlink.c
48382@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
48383 void quota_send_warning(short type, unsigned int id, dev_t dev,
48384 const char warntype)
48385 {
48386- static atomic_t seq;
48387+ static atomic_unchecked_t seq;
48388 struct sk_buff *skb;
48389 void *msg_head;
48390 int ret;
48391@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
48392 "VFS: Not enough memory to send quota warning.\n");
48393 return;
48394 }
48395- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
48396+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
48397 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
48398 if (!msg_head) {
48399 printk(KERN_ERR
48400diff --git a/fs/readdir.c b/fs/readdir.c
48401index cc0a822..43cb195 100644
48402--- a/fs/readdir.c
48403+++ b/fs/readdir.c
48404@@ -17,6 +17,7 @@
48405 #include <linux/security.h>
48406 #include <linux/syscalls.h>
48407 #include <linux/unistd.h>
48408+#include <linux/namei.h>
48409
48410 #include <asm/uaccess.h>
48411
48412@@ -67,6 +68,7 @@ struct old_linux_dirent {
48413
48414 struct readdir_callback {
48415 struct old_linux_dirent __user * dirent;
48416+ struct file * file;
48417 int result;
48418 };
48419
48420@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
48421 buf->result = -EOVERFLOW;
48422 return -EOVERFLOW;
48423 }
48424+
48425+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48426+ return 0;
48427+
48428 buf->result++;
48429 dirent = buf->dirent;
48430 if (!access_ok(VERIFY_WRITE, dirent,
48431@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
48432
48433 buf.result = 0;
48434 buf.dirent = dirent;
48435+ buf.file = file;
48436
48437 error = vfs_readdir(file, fillonedir, &buf);
48438 if (buf.result)
48439@@ -142,6 +149,7 @@ struct linux_dirent {
48440 struct getdents_callback {
48441 struct linux_dirent __user * current_dir;
48442 struct linux_dirent __user * previous;
48443+ struct file * file;
48444 int count;
48445 int error;
48446 };
48447@@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
48448 buf->error = -EOVERFLOW;
48449 return -EOVERFLOW;
48450 }
48451+
48452+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48453+ return 0;
48454+
48455 dirent = buf->previous;
48456 if (dirent) {
48457 if (__put_user(offset, &dirent->d_off))
48458@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
48459 buf.previous = NULL;
48460 buf.count = count;
48461 buf.error = 0;
48462+ buf.file = file;
48463
48464 error = vfs_readdir(file, filldir, &buf);
48465 if (error >= 0)
48466@@ -229,6 +242,7 @@ out:
48467 struct getdents_callback64 {
48468 struct linux_dirent64 __user * current_dir;
48469 struct linux_dirent64 __user * previous;
48470+ struct file *file;
48471 int count;
48472 int error;
48473 };
48474@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
48475 buf->error = -EINVAL; /* only used if we fail.. */
48476 if (reclen > buf->count)
48477 return -EINVAL;
48478+
48479+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48480+ return 0;
48481+
48482 dirent = buf->previous;
48483 if (dirent) {
48484 if (__put_user(offset, &dirent->d_off))
48485@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
48486
48487 buf.current_dir = dirent;
48488 buf.previous = NULL;
48489+ buf.file = file;
48490 buf.count = count;
48491 buf.error = 0;
48492
48493@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
48494 error = buf.error;
48495 lastdirent = buf.previous;
48496 if (lastdirent) {
48497- typeof(lastdirent->d_off) d_off = file->f_pos;
48498+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
48499 if (__put_user(d_off, &lastdirent->d_off))
48500 error = -EFAULT;
48501 else
48502diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
48503index 2b7882b..1c5ef48 100644
48504--- a/fs/reiserfs/do_balan.c
48505+++ b/fs/reiserfs/do_balan.c
48506@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
48507 return;
48508 }
48509
48510- atomic_inc(&(fs_generation(tb->tb_sb)));
48511+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
48512 do_balance_starts(tb);
48513
48514 /* balance leaf returns 0 except if combining L R and S into
48515diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
48516index 2c1ade6..8c59d8d 100644
48517--- a/fs/reiserfs/procfs.c
48518+++ b/fs/reiserfs/procfs.c
48519@@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
48520 "SMALL_TAILS " : "NO_TAILS ",
48521 replay_only(sb) ? "REPLAY_ONLY " : "",
48522 convert_reiserfs(sb) ? "CONV " : "",
48523- atomic_read(&r->s_generation_counter),
48524+ atomic_read_unchecked(&r->s_generation_counter),
48525 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
48526 SF(s_do_balance), SF(s_unneeded_left_neighbor),
48527 SF(s_good_search_by_key_reada), SF(s_bmaps),
48528diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
48529index a59d271..e12d1cf 100644
48530--- a/fs/reiserfs/reiserfs.h
48531+++ b/fs/reiserfs/reiserfs.h
48532@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
48533 /* Comment? -Hans */
48534 wait_queue_head_t s_wait;
48535 /* To be obsoleted soon by per buffer seals.. -Hans */
48536- atomic_t s_generation_counter; // increased by one every time the
48537+ atomic_unchecked_t s_generation_counter; // increased by one every time the
48538 // tree gets re-balanced
48539 unsigned long s_properties; /* File system properties. Currently holds
48540 on-disk FS format */
48541@@ -1973,7 +1973,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
48542 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
48543
48544 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
48545-#define get_generation(s) atomic_read (&fs_generation(s))
48546+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
48547 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
48548 #define __fs_changed(gen,s) (gen != get_generation (s))
48549 #define fs_changed(gen,s) \
48550diff --git a/fs/select.c b/fs/select.c
48551index 17d33d0..da0bf5c 100644
48552--- a/fs/select.c
48553+++ b/fs/select.c
48554@@ -20,6 +20,7 @@
48555 #include <linux/export.h>
48556 #include <linux/slab.h>
48557 #include <linux/poll.h>
48558+#include <linux/security.h>
48559 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
48560 #include <linux/file.h>
48561 #include <linux/fdtable.h>
48562@@ -833,6 +834,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
48563 struct poll_list *walk = head;
48564 unsigned long todo = nfds;
48565
48566+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
48567 if (nfds > rlimit(RLIMIT_NOFILE))
48568 return -EINVAL;
48569
48570diff --git a/fs/seq_file.c b/fs/seq_file.c
48571index 0cbd049..cab1127 100644
48572--- a/fs/seq_file.c
48573+++ b/fs/seq_file.c
48574@@ -9,6 +9,7 @@
48575 #include <linux/export.h>
48576 #include <linux/seq_file.h>
48577 #include <linux/slab.h>
48578+#include <linux/sched.h>
48579
48580 #include <asm/uaccess.h>
48581 #include <asm/page.h>
48582@@ -56,6 +57,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
48583 memset(p, 0, sizeof(*p));
48584 mutex_init(&p->lock);
48585 p->op = op;
48586+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48587+ p->exec_id = current->exec_id;
48588+#endif
48589
48590 /*
48591 * Wrappers around seq_open(e.g. swaps_open) need to be
48592@@ -567,7 +571,7 @@ static void single_stop(struct seq_file *p, void *v)
48593 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
48594 void *data)
48595 {
48596- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
48597+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
48598 int res = -ENOMEM;
48599
48600 if (op) {
48601diff --git a/fs/splice.c b/fs/splice.c
48602index f847684..156619e 100644
48603--- a/fs/splice.c
48604+++ b/fs/splice.c
48605@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
48606 pipe_lock(pipe);
48607
48608 for (;;) {
48609- if (!pipe->readers) {
48610+ if (!atomic_read(&pipe->readers)) {
48611 send_sig(SIGPIPE, current, 0);
48612 if (!ret)
48613 ret = -EPIPE;
48614@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
48615 do_wakeup = 0;
48616 }
48617
48618- pipe->waiting_writers++;
48619+ atomic_inc(&pipe->waiting_writers);
48620 pipe_wait(pipe);
48621- pipe->waiting_writers--;
48622+ atomic_dec(&pipe->waiting_writers);
48623 }
48624
48625 pipe_unlock(pipe);
48626@@ -560,7 +560,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
48627 old_fs = get_fs();
48628 set_fs(get_ds());
48629 /* The cast to a user pointer is valid due to the set_fs() */
48630- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
48631+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
48632 set_fs(old_fs);
48633
48634 return res;
48635@@ -575,7 +575,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
48636 old_fs = get_fs();
48637 set_fs(get_ds());
48638 /* The cast to a user pointer is valid due to the set_fs() */
48639- res = vfs_write(file, (const char __user *)buf, count, &pos);
48640+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
48641 set_fs(old_fs);
48642
48643 return res;
48644@@ -626,7 +626,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
48645 goto err;
48646
48647 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
48648- vec[i].iov_base = (void __user *) page_address(page);
48649+ vec[i].iov_base = (void __force_user *) page_address(page);
48650 vec[i].iov_len = this_len;
48651 spd.pages[i] = page;
48652 spd.nr_pages++;
48653@@ -845,10 +845,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
48654 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
48655 {
48656 while (!pipe->nrbufs) {
48657- if (!pipe->writers)
48658+ if (!atomic_read(&pipe->writers))
48659 return 0;
48660
48661- if (!pipe->waiting_writers && sd->num_spliced)
48662+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
48663 return 0;
48664
48665 if (sd->flags & SPLICE_F_NONBLOCK)
48666@@ -1181,7 +1181,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
48667 * out of the pipe right after the splice_to_pipe(). So set
48668 * PIPE_READERS appropriately.
48669 */
48670- pipe->readers = 1;
48671+ atomic_set(&pipe->readers, 1);
48672
48673 current->splice_pipe = pipe;
48674 }
48675@@ -1733,9 +1733,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48676 ret = -ERESTARTSYS;
48677 break;
48678 }
48679- if (!pipe->writers)
48680+ if (!atomic_read(&pipe->writers))
48681 break;
48682- if (!pipe->waiting_writers) {
48683+ if (!atomic_read(&pipe->waiting_writers)) {
48684 if (flags & SPLICE_F_NONBLOCK) {
48685 ret = -EAGAIN;
48686 break;
48687@@ -1767,7 +1767,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48688 pipe_lock(pipe);
48689
48690 while (pipe->nrbufs >= pipe->buffers) {
48691- if (!pipe->readers) {
48692+ if (!atomic_read(&pipe->readers)) {
48693 send_sig(SIGPIPE, current, 0);
48694 ret = -EPIPE;
48695 break;
48696@@ -1780,9 +1780,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48697 ret = -ERESTARTSYS;
48698 break;
48699 }
48700- pipe->waiting_writers++;
48701+ atomic_inc(&pipe->waiting_writers);
48702 pipe_wait(pipe);
48703- pipe->waiting_writers--;
48704+ atomic_dec(&pipe->waiting_writers);
48705 }
48706
48707 pipe_unlock(pipe);
48708@@ -1818,14 +1818,14 @@ retry:
48709 pipe_double_lock(ipipe, opipe);
48710
48711 do {
48712- if (!opipe->readers) {
48713+ if (!atomic_read(&opipe->readers)) {
48714 send_sig(SIGPIPE, current, 0);
48715 if (!ret)
48716 ret = -EPIPE;
48717 break;
48718 }
48719
48720- if (!ipipe->nrbufs && !ipipe->writers)
48721+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
48722 break;
48723
48724 /*
48725@@ -1922,7 +1922,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
48726 pipe_double_lock(ipipe, opipe);
48727
48728 do {
48729- if (!opipe->readers) {
48730+ if (!atomic_read(&opipe->readers)) {
48731 send_sig(SIGPIPE, current, 0);
48732 if (!ret)
48733 ret = -EPIPE;
48734@@ -1967,7 +1967,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
48735 * return EAGAIN if we have the potential of some data in the
48736 * future, otherwise just return 0
48737 */
48738- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
48739+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
48740 ret = -EAGAIN;
48741
48742 pipe_unlock(ipipe);
48743diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
48744index 35a36d3..23424b2 100644
48745--- a/fs/sysfs/dir.c
48746+++ b/fs/sysfs/dir.c
48747@@ -657,6 +657,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
48748 struct sysfs_dirent *sd;
48749 int rc;
48750
48751+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
48752+ const char *parent_name = parent_sd->s_name;
48753+
48754+ mode = S_IFDIR | S_IRWXU;
48755+
48756+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
48757+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
48758+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
48759+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
48760+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
48761+#endif
48762+
48763 /* allocate */
48764 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
48765 if (!sd)
48766diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
48767index 00012e3..8392349 100644
48768--- a/fs/sysfs/file.c
48769+++ b/fs/sysfs/file.c
48770@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
48771
48772 struct sysfs_open_dirent {
48773 atomic_t refcnt;
48774- atomic_t event;
48775+ atomic_unchecked_t event;
48776 wait_queue_head_t poll;
48777 struct list_head buffers; /* goes through sysfs_buffer.list */
48778 };
48779@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
48780 if (!sysfs_get_active(attr_sd))
48781 return -ENODEV;
48782
48783- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
48784+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
48785 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
48786
48787 sysfs_put_active(attr_sd);
48788@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
48789 return -ENOMEM;
48790
48791 atomic_set(&new_od->refcnt, 0);
48792- atomic_set(&new_od->event, 1);
48793+ atomic_set_unchecked(&new_od->event, 1);
48794 init_waitqueue_head(&new_od->poll);
48795 INIT_LIST_HEAD(&new_od->buffers);
48796 goto retry;
48797@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
48798
48799 sysfs_put_active(attr_sd);
48800
48801- if (buffer->event != atomic_read(&od->event))
48802+ if (buffer->event != atomic_read_unchecked(&od->event))
48803 goto trigger;
48804
48805 return DEFAULT_POLLMASK;
48806@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
48807
48808 od = sd->s_attr.open;
48809 if (od) {
48810- atomic_inc(&od->event);
48811+ atomic_inc_unchecked(&od->event);
48812 wake_up_interruptible(&od->poll);
48813 }
48814
48815diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
48816index a7ac78f..02158e1 100644
48817--- a/fs/sysfs/symlink.c
48818+++ b/fs/sysfs/symlink.c
48819@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48820
48821 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
48822 {
48823- char *page = nd_get_link(nd);
48824+ const char *page = nd_get_link(nd);
48825 if (!IS_ERR(page))
48826 free_page((unsigned long)page);
48827 }
48828diff --git a/fs/udf/misc.c b/fs/udf/misc.c
48829index c175b4d..8f36a16 100644
48830--- a/fs/udf/misc.c
48831+++ b/fs/udf/misc.c
48832@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
48833
48834 u8 udf_tag_checksum(const struct tag *t)
48835 {
48836- u8 *data = (u8 *)t;
48837+ const u8 *data = (const u8 *)t;
48838 u8 checksum = 0;
48839 int i;
48840 for (i = 0; i < sizeof(struct tag); ++i)
48841diff --git a/fs/utimes.c b/fs/utimes.c
48842index ba653f3..06ea4b1 100644
48843--- a/fs/utimes.c
48844+++ b/fs/utimes.c
48845@@ -1,6 +1,7 @@
48846 #include <linux/compiler.h>
48847 #include <linux/file.h>
48848 #include <linux/fs.h>
48849+#include <linux/security.h>
48850 #include <linux/linkage.h>
48851 #include <linux/mount.h>
48852 #include <linux/namei.h>
48853@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
48854 goto mnt_drop_write_and_out;
48855 }
48856 }
48857+
48858+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
48859+ error = -EACCES;
48860+ goto mnt_drop_write_and_out;
48861+ }
48862+
48863 mutex_lock(&inode->i_mutex);
48864 error = notify_change(path->dentry, &newattrs);
48865 mutex_unlock(&inode->i_mutex);
48866diff --git a/fs/xattr.c b/fs/xattr.c
48867index 3c8c1cc..a83c398 100644
48868--- a/fs/xattr.c
48869+++ b/fs/xattr.c
48870@@ -316,7 +316,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
48871 * Extended attribute SET operations
48872 */
48873 static long
48874-setxattr(struct dentry *d, const char __user *name, const void __user *value,
48875+setxattr(struct path *path, const char __user *name, const void __user *value,
48876 size_t size, int flags)
48877 {
48878 int error;
48879@@ -349,7 +349,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
48880 }
48881 }
48882
48883- error = vfs_setxattr(d, kname, kvalue, size, flags);
48884+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
48885+ error = -EACCES;
48886+ goto out;
48887+ }
48888+
48889+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
48890 out:
48891 if (vvalue)
48892 vfree(vvalue);
48893@@ -370,7 +375,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
48894 return error;
48895 error = mnt_want_write(path.mnt);
48896 if (!error) {
48897- error = setxattr(path.dentry, name, value, size, flags);
48898+ error = setxattr(&path, name, value, size, flags);
48899 mnt_drop_write(path.mnt);
48900 }
48901 path_put(&path);
48902@@ -389,7 +394,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
48903 return error;
48904 error = mnt_want_write(path.mnt);
48905 if (!error) {
48906- error = setxattr(path.dentry, name, value, size, flags);
48907+ error = setxattr(&path, name, value, size, flags);
48908 mnt_drop_write(path.mnt);
48909 }
48910 path_put(&path);
48911@@ -400,17 +405,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
48912 const void __user *,value, size_t, size, int, flags)
48913 {
48914 struct file *f;
48915- struct dentry *dentry;
48916 int error = -EBADF;
48917
48918 f = fget(fd);
48919 if (!f)
48920 return error;
48921- dentry = f->f_path.dentry;
48922- audit_inode(NULL, dentry);
48923+ audit_inode(NULL, f->f_path.dentry);
48924 error = mnt_want_write_file(f);
48925 if (!error) {
48926- error = setxattr(dentry, name, value, size, flags);
48927+ error = setxattr(&f->f_path, name, value, size, flags);
48928 mnt_drop_write_file(f);
48929 }
48930 fput(f);
48931diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
48932index 69d06b0..c0996e5 100644
48933--- a/fs/xattr_acl.c
48934+++ b/fs/xattr_acl.c
48935@@ -17,8 +17,8 @@
48936 struct posix_acl *
48937 posix_acl_from_xattr(const void *value, size_t size)
48938 {
48939- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
48940- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
48941+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
48942+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
48943 int count;
48944 struct posix_acl *acl;
48945 struct posix_acl_entry *acl_e;
48946diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
48947index 85e7e32..5344e52 100644
48948--- a/fs/xfs/xfs_bmap.c
48949+++ b/fs/xfs/xfs_bmap.c
48950@@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
48951 int nmap,
48952 int ret_nmap);
48953 #else
48954-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
48955+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
48956 #endif /* DEBUG */
48957
48958 STATIC int
48959diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
48960index 79d05e8..e3e5861 100644
48961--- a/fs/xfs/xfs_dir2_sf.c
48962+++ b/fs/xfs/xfs_dir2_sf.c
48963@@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
48964 }
48965
48966 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
48967- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
48968+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
48969+ char name[sfep->namelen];
48970+ memcpy(name, sfep->name, sfep->namelen);
48971+ if (filldir(dirent, name, sfep->namelen,
48972+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
48973+ *offset = off & 0x7fffffff;
48974+ return 0;
48975+ }
48976+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
48977 off & 0x7fffffff, ino, DT_UNKNOWN)) {
48978 *offset = off & 0x7fffffff;
48979 return 0;
48980diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
48981index 91f8ff5..0ce68f9 100644
48982--- a/fs/xfs/xfs_ioctl.c
48983+++ b/fs/xfs/xfs_ioctl.c
48984@@ -128,7 +128,7 @@ xfs_find_handle(
48985 }
48986
48987 error = -EFAULT;
48988- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
48989+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
48990 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
48991 goto out_put;
48992
48993diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
48994index 3011b87..1ab03e9 100644
48995--- a/fs/xfs/xfs_iops.c
48996+++ b/fs/xfs/xfs_iops.c
48997@@ -397,7 +397,7 @@ xfs_vn_put_link(
48998 struct nameidata *nd,
48999 void *p)
49000 {
49001- char *s = nd_get_link(nd);
49002+ const char *s = nd_get_link(nd);
49003
49004 if (!IS_ERR(s))
49005 kfree(s);
49006diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
49007new file mode 100644
49008index 0000000..2645296
49009--- /dev/null
49010+++ b/grsecurity/Kconfig
49011@@ -0,0 +1,1079 @@
49012+#
49013+# grecurity configuration
49014+#
49015+
49016+menu "Grsecurity"
49017+
49018+config GRKERNSEC
49019+ bool "Grsecurity"
49020+ select CRYPTO
49021+ select CRYPTO_SHA256
49022+ help
49023+ If you say Y here, you will be able to configure many features
49024+ that will enhance the security of your system. It is highly
49025+ recommended that you say Y here and read through the help
49026+ for each option so that you fully understand the features and
49027+ can evaluate their usefulness for your machine.
49028+
49029+choice
49030+ prompt "Security Level"
49031+ depends on GRKERNSEC
49032+ default GRKERNSEC_CUSTOM
49033+
49034+config GRKERNSEC_LOW
49035+ bool "Low"
49036+ select GRKERNSEC_LINK
49037+ select GRKERNSEC_FIFO
49038+ select GRKERNSEC_RANDNET
49039+ select GRKERNSEC_DMESG
49040+ select GRKERNSEC_CHROOT
49041+ select GRKERNSEC_CHROOT_CHDIR
49042+
49043+ help
49044+ If you choose this option, several of the grsecurity options will
49045+ be enabled that will give you greater protection against a number
49046+ of attacks, while assuring that none of your software will have any
49047+ conflicts with the additional security measures. If you run a lot
49048+ of unusual software, or you are having problems with the higher
49049+ security levels, you should say Y here. With this option, the
49050+ following features are enabled:
49051+
49052+ - Linking restrictions
49053+ - FIFO restrictions
49054+ - Restricted dmesg
49055+ - Enforced chdir("/") on chroot
49056+ - Runtime module disabling
49057+
49058+config GRKERNSEC_MEDIUM
49059+ bool "Medium"
49060+ select PAX
49061+ select PAX_EI_PAX
49062+ select PAX_PT_PAX_FLAGS
49063+ select PAX_HAVE_ACL_FLAGS
49064+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49065+ select GRKERNSEC_CHROOT
49066+ select GRKERNSEC_CHROOT_SYSCTL
49067+ select GRKERNSEC_LINK
49068+ select GRKERNSEC_FIFO
49069+ select GRKERNSEC_DMESG
49070+ select GRKERNSEC_RANDNET
49071+ select GRKERNSEC_FORKFAIL
49072+ select GRKERNSEC_TIME
49073+ select GRKERNSEC_SIGNAL
49074+ select GRKERNSEC_CHROOT
49075+ select GRKERNSEC_CHROOT_UNIX
49076+ select GRKERNSEC_CHROOT_MOUNT
49077+ select GRKERNSEC_CHROOT_PIVOT
49078+ select GRKERNSEC_CHROOT_DOUBLE
49079+ select GRKERNSEC_CHROOT_CHDIR
49080+ select GRKERNSEC_CHROOT_MKNOD
49081+ select GRKERNSEC_PROC
49082+ select GRKERNSEC_PROC_USERGROUP
49083+ select PAX_RANDUSTACK
49084+ select PAX_ASLR
49085+ select PAX_RANDMMAP
49086+ select PAX_REFCOUNT if (X86 || SPARC64)
49087+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
49088+
49089+ help
49090+ If you say Y here, several features in addition to those included
49091+ in the low additional security level will be enabled. These
49092+ features provide even more security to your system, though in rare
49093+ cases they may be incompatible with very old or poorly written
49094+ software. If you enable this option, make sure that your auth
49095+ service (identd) is running as gid 1001. With this option,
49096+ the following features (in addition to those provided in the
49097+ low additional security level) will be enabled:
49098+
49099+ - Failed fork logging
49100+ - Time change logging
49101+ - Signal logging
49102+ - Deny mounts in chroot
49103+ - Deny double chrooting
49104+ - Deny sysctl writes in chroot
49105+ - Deny mknod in chroot
49106+ - Deny access to abstract AF_UNIX sockets out of chroot
49107+ - Deny pivot_root in chroot
49108+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
49109+ - /proc restrictions with special GID set to 10 (usually wheel)
49110+ - Address Space Layout Randomization (ASLR)
49111+ - Prevent exploitation of most refcount overflows
49112+ - Bounds checking of copying between the kernel and userland
49113+
49114+config GRKERNSEC_HIGH
49115+ bool "High"
49116+ select GRKERNSEC_LINK
49117+ select GRKERNSEC_FIFO
49118+ select GRKERNSEC_DMESG
49119+ select GRKERNSEC_FORKFAIL
49120+ select GRKERNSEC_TIME
49121+ select GRKERNSEC_SIGNAL
49122+ select GRKERNSEC_CHROOT
49123+ select GRKERNSEC_CHROOT_SHMAT
49124+ select GRKERNSEC_CHROOT_UNIX
49125+ select GRKERNSEC_CHROOT_MOUNT
49126+ select GRKERNSEC_CHROOT_FCHDIR
49127+ select GRKERNSEC_CHROOT_PIVOT
49128+ select GRKERNSEC_CHROOT_DOUBLE
49129+ select GRKERNSEC_CHROOT_CHDIR
49130+ select GRKERNSEC_CHROOT_MKNOD
49131+ select GRKERNSEC_CHROOT_CAPS
49132+ select GRKERNSEC_CHROOT_SYSCTL
49133+ select GRKERNSEC_CHROOT_FINDTASK
49134+ select GRKERNSEC_SYSFS_RESTRICT
49135+ select GRKERNSEC_PROC
49136+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49137+ select GRKERNSEC_HIDESYM
49138+ select GRKERNSEC_BRUTE
49139+ select GRKERNSEC_PROC_USERGROUP
49140+ select GRKERNSEC_KMEM
49141+ select GRKERNSEC_RESLOG
49142+ select GRKERNSEC_RANDNET
49143+ select GRKERNSEC_PROC_ADD
49144+ select GRKERNSEC_CHROOT_CHMOD
49145+ select GRKERNSEC_CHROOT_NICE
49146+ select GRKERNSEC_SETXID if (X86 || SPARC64 || PPC || ARM || MIPS)
49147+ select GRKERNSEC_AUDIT_MOUNT
49148+ select GRKERNSEC_MODHARDEN if (MODULES)
49149+ select GRKERNSEC_HARDEN_PTRACE
49150+ select GRKERNSEC_PTRACE_READEXEC
49151+ select GRKERNSEC_VM86 if (X86_32)
49152+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
49153+ select PAX
49154+ select PAX_RANDUSTACK
49155+ select PAX_ASLR
49156+ select PAX_RANDMMAP
49157+ select PAX_NOEXEC
49158+ select PAX_MPROTECT
49159+ select PAX_EI_PAX
49160+ select PAX_PT_PAX_FLAGS
49161+ select PAX_HAVE_ACL_FLAGS
49162+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
49163+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
49164+ select PAX_RANDKSTACK if (X86_TSC && X86)
49165+ select PAX_SEGMEXEC if (X86_32)
49166+ select PAX_PAGEEXEC
49167+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
49168+ select PAX_EMUTRAMP if (PARISC)
49169+ select PAX_EMUSIGRT if (PARISC)
49170+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
49171+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
49172+ select PAX_REFCOUNT if (X86 || SPARC64)
49173+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
49174+ help
49175+ If you say Y here, many of the features of grsecurity will be
49176+ enabled, which will protect you against many kinds of attacks
49177+ against your system. The heightened security comes at a cost
49178+ of an increased chance of incompatibilities with rare software
49179+ on your machine. Since this security level enables PaX, you should
49180+ view <http://pax.grsecurity.net> and read about the PaX
49181+ project. While you are there, download chpax and run it on
49182+ binaries that cause problems with PaX. Also remember that
49183+ since the /proc restrictions are enabled, you must run your
49184+ identd as gid 1001. This security level enables the following
49185+ features in addition to those listed in the low and medium
49186+ security levels:
49187+
49188+ - Additional /proc restrictions
49189+ - Chmod restrictions in chroot
49190+ - No signals, ptrace, or viewing of processes outside of chroot
49191+ - Capability restrictions in chroot
49192+ - Deny fchdir out of chroot
49193+ - Priority restrictions in chroot
49194+ - Segmentation-based implementation of PaX
49195+ - Mprotect restrictions
49196+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
49197+ - Kernel stack randomization
49198+ - Mount/unmount/remount logging
49199+ - Kernel symbol hiding
49200+ - Hardening of module auto-loading
49201+ - Ptrace restrictions
49202+ - Restricted vm86 mode
49203+ - Restricted sysfs/debugfs
49204+ - Active kernel exploit response
49205+
49206+config GRKERNSEC_CUSTOM
49207+ bool "Custom"
49208+ help
49209+ If you say Y here, you will be able to configure every grsecurity
49210+ option, which allows you to enable many more features that aren't
49211+ covered in the basic security levels. These additional features
49212+ include TPE, socket restrictions, and the sysctl system for
49213+ grsecurity. It is advised that you read through the help for
49214+ each option to determine its usefulness in your situation.
49215+
49216+endchoice
49217+
49218+menu "Memory Protections"
49219+depends on GRKERNSEC
49220+
49221+config GRKERNSEC_KMEM
49222+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
49223+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
49224+ help
49225+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
49226+ be written to or read from to modify or leak the contents of the running
49227+ kernel. /dev/port will also not be allowed to be opened. If you have module
49228+ support disabled, enabling this will close up four ways that are
49229+ currently used to insert malicious code into the running kernel.
49230+ Even with all these features enabled, we still highly recommend that
49231+ you use the RBAC system, as it is still possible for an attacker to
49232+ modify the running kernel through privileged I/O granted by ioperm/iopl.
49233+ If you are not using XFree86, you may be able to stop this additional
49234+ case by enabling the 'Disable privileged I/O' option. Though nothing
49235+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
49236+ but only to video memory, which is the only writing we allow in this
49237+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
49238+ not be allowed to mprotect it with PROT_WRITE later.
49239+ It is highly recommended that you say Y here if you meet all the
49240+ conditions above.
49241+
49242+config GRKERNSEC_VM86
49243+ bool "Restrict VM86 mode"
49244+ depends on X86_32
49245+
49246+ help
49247+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
49248+ make use of a special execution mode on 32bit x86 processors called
49249+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
49250+ video cards and will still work with this option enabled. The purpose
49251+ of the option is to prevent exploitation of emulation errors in
49252+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
49253+ Nearly all users should be able to enable this option.
49254+
49255+config GRKERNSEC_IO
49256+ bool "Disable privileged I/O"
49257+ depends on X86
49258+ select RTC_CLASS
49259+ select RTC_INTF_DEV
49260+ select RTC_DRV_CMOS
49261+
49262+ help
49263+ If you say Y here, all ioperm and iopl calls will return an error.
49264+ Ioperm and iopl can be used to modify the running kernel.
49265+ Unfortunately, some programs need this access to operate properly,
49266+ the most notable of which are XFree86 and hwclock. hwclock can be
49267+ remedied by having RTC support in the kernel, so real-time
49268+ clock support is enabled if this option is enabled, to ensure
49269+ that hwclock operates correctly. XFree86 still will not
49270+ operate correctly with this option enabled, so DO NOT CHOOSE Y
49271+ IF YOU USE XFree86. If you use XFree86 and you still want to
49272+ protect your kernel against modification, use the RBAC system.
49273+
49274+config GRKERNSEC_PROC_MEMMAP
49275+ bool "Harden ASLR against information leaks and entropy reduction"
49276+ default y if (PAX_NOEXEC || PAX_ASLR)
49277+ depends on PAX_NOEXEC || PAX_ASLR
49278+ help
49279+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
49280+ give no information about the addresses of its mappings if
49281+ PaX features that rely on random addresses are enabled on the task.
49282+ In addition to sanitizing this information and disabling other
49283+ dangerous sources of information, this option causes reads of sensitive
49284+ /proc/<pid> entries where the file descriptor was opened in a different
49285+ task than the one performing the read. Such attempts are logged.
49286+ This option also limits argv/env strings for suid/sgid binaries
49287+ to 512KB to prevent a complete exhaustion of the stack entropy provided
49288+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
49289+ binaries to prevent alternative mmap layouts from being abused.
49290+
49291+ If you use PaX it is essential that you say Y here as it closes up
49292+ several holes that make full ASLR useless locally.
49293+
49294+config GRKERNSEC_BRUTE
49295+ bool "Deter exploit bruteforcing"
49296+ help
49297+ If you say Y here, attempts to bruteforce exploits against forking
49298+ daemons such as apache or sshd, as well as against suid/sgid binaries
49299+ will be deterred. When a child of a forking daemon is killed by PaX
49300+ or crashes due to an illegal instruction or other suspicious signal,
49301+ the parent process will be delayed 30 seconds upon every subsequent
49302+ fork until the administrator is able to assess the situation and
49303+ restart the daemon.
49304+ In the suid/sgid case, the attempt is logged, the user has all their
49305+ processes terminated, and they are prevented from executing any further
49306+ processes for 15 minutes.
49307+ It is recommended that you also enable signal logging in the auditing
49308+ section so that logs are generated when a process triggers a suspicious
49309+ signal.
49310+ If the sysctl option is enabled, a sysctl option with name
49311+ "deter_bruteforce" is created.
49312+
49313+
49314+config GRKERNSEC_MODHARDEN
49315+ bool "Harden module auto-loading"
49316+ depends on MODULES
49317+ help
49318+ If you say Y here, module auto-loading in response to use of some
49319+ feature implemented by an unloaded module will be restricted to
49320+ root users. Enabling this option helps defend against attacks
49321+ by unprivileged users who abuse the auto-loading behavior to
49322+ cause a vulnerable module to load that is then exploited.
49323+
49324+ If this option prevents a legitimate use of auto-loading for a
49325+ non-root user, the administrator can execute modprobe manually
49326+ with the exact name of the module mentioned in the alert log.
49327+ Alternatively, the administrator can add the module to the list
49328+ of modules loaded at boot by modifying init scripts.
49329+
49330+ Modification of init scripts will most likely be needed on
49331+ Ubuntu servers with encrypted home directory support enabled,
49332+ as the first non-root user logging in will cause the ecb(aes),
49333+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
49334+
49335+config GRKERNSEC_HIDESYM
49336+ bool "Hide kernel symbols"
49337+ help
49338+ If you say Y here, getting information on loaded modules, and
49339+ displaying all kernel symbols through a syscall will be restricted
49340+ to users with CAP_SYS_MODULE. For software compatibility reasons,
49341+ /proc/kallsyms will be restricted to the root user. The RBAC
49342+ system can hide that entry even from root.
49343+
49344+ This option also prevents leaking of kernel addresses through
49345+ several /proc entries.
49346+
49347+ Note that this option is only effective provided the following
49348+ conditions are met:
49349+ 1) The kernel using grsecurity is not precompiled by some distribution
49350+ 2) You have also enabled GRKERNSEC_DMESG
49351+ 3) You are using the RBAC system and hiding other files such as your
49352+ kernel image and System.map. Alternatively, enabling this option
49353+ causes the permissions on /boot, /lib/modules, and the kernel
49354+ source directory to change at compile time to prevent
49355+ reading by non-root users.
49356+ If the above conditions are met, this option will aid in providing a
49357+ useful protection against local kernel exploitation of overflows
49358+ and arbitrary read/write vulnerabilities.
49359+
49360+config GRKERNSEC_KERN_LOCKOUT
49361+ bool "Active kernel exploit response"
49362+ depends on X86 || ARM || PPC || SPARC
49363+ help
49364+ If you say Y here, when a PaX alert is triggered due to suspicious
49365+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
49366+ or an OOPs occurs due to bad memory accesses, instead of just
49367+ terminating the offending process (and potentially allowing
49368+ a subsequent exploit from the same user), we will take one of two
49369+ actions:
49370+ If the user was root, we will panic the system
49371+ If the user was non-root, we will log the attempt, terminate
49372+ all processes owned by the user, then prevent them from creating
49373+ any new processes until the system is restarted
49374+ This deters repeated kernel exploitation/bruteforcing attempts
49375+ and is useful for later forensics.
49376+
49377+endmenu
49378+menu "Role Based Access Control Options"
49379+depends on GRKERNSEC
49380+
49381+config GRKERNSEC_RBAC_DEBUG
49382+ bool
49383+
49384+config GRKERNSEC_NO_RBAC
49385+ bool "Disable RBAC system"
49386+ help
49387+ If you say Y here, the /dev/grsec device will be removed from the kernel,
49388+ preventing the RBAC system from being enabled. You should only say Y
49389+ here if you have no intention of using the RBAC system, so as to prevent
49390+ an attacker with root access from misusing the RBAC system to hide files
49391+ and processes when loadable module support and /dev/[k]mem have been
49392+ locked down.
49393+
49394+config GRKERNSEC_ACL_HIDEKERN
49395+ bool "Hide kernel processes"
49396+ help
49397+ If you say Y here, all kernel threads will be hidden to all
49398+ processes but those whose subject has the "view hidden processes"
49399+ flag.
49400+
49401+config GRKERNSEC_ACL_MAXTRIES
49402+ int "Maximum tries before password lockout"
49403+ default 3
49404+ help
49405+ This option enforces the maximum number of times a user can attempt
49406+ to authorize themselves with the grsecurity RBAC system before being
49407+ denied the ability to attempt authorization again for a specified time.
49408+ The lower the number, the harder it will be to brute-force a password.
49409+
49410+config GRKERNSEC_ACL_TIMEOUT
49411+ int "Time to wait after max password tries, in seconds"
49412+ default 30
49413+ help
49414+ This option specifies the time the user must wait after attempting to
49415+ authorize to the RBAC system with the maximum number of invalid
49416+ passwords. The higher the number, the harder it will be to brute-force
49417+ a password.
49418+
49419+endmenu
49420+menu "Filesystem Protections"
49421+depends on GRKERNSEC
49422+
49423+config GRKERNSEC_PROC
49424+ bool "Proc restrictions"
49425+ help
49426+ If you say Y here, the permissions of the /proc filesystem
49427+ will be altered to enhance system security and privacy. You MUST
49428+ choose either a user only restriction or a user and group restriction.
49429+ Depending upon the option you choose, you can either restrict users to
49430+ see only the processes they themselves run, or choose a group that can
49431+ view all processes and files normally restricted to root if you choose
49432+ the "restrict to user only" option. NOTE: If you're running identd or
49433+ ntpd as a non-root user, you will have to run it as the group you
49434+ specify here.
49435+
49436+config GRKERNSEC_PROC_USER
49437+ bool "Restrict /proc to user only"
49438+ depends on GRKERNSEC_PROC
49439+ help
49440+ If you say Y here, non-root users will only be able to view their own
49441+ processes, and restricts them from viewing network-related information,
49442+ and viewing kernel symbol and module information.
49443+
49444+config GRKERNSEC_PROC_USERGROUP
49445+ bool "Allow special group"
49446+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
49447+ help
49448+ If you say Y here, you will be able to select a group that will be
49449+ able to view all processes and network-related information. If you've
49450+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
49451+ remain hidden. This option is useful if you want to run identd as
49452+ a non-root user.
49453+
49454+config GRKERNSEC_PROC_GID
49455+ int "GID for special group"
49456+ depends on GRKERNSEC_PROC_USERGROUP
49457+ default 1001
49458+
49459+config GRKERNSEC_PROC_ADD
49460+ bool "Additional restrictions"
49461+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
49462+ help
49463+ If you say Y here, additional restrictions will be placed on
49464+ /proc that keep normal users from viewing device information and
49465+ slabinfo information that could be useful for exploits.
49466+
49467+config GRKERNSEC_LINK
49468+ bool "Linking restrictions"
49469+ help
49470+ If you say Y here, /tmp race exploits will be prevented, since users
49471+ will no longer be able to follow symlinks owned by other users in
49472+ world-writable +t directories (e.g. /tmp), unless the owner of the
49473+ symlink is the owner of the directory. users will also not be
49474+ able to hardlink to files they do not own. If the sysctl option is
49475+ enabled, a sysctl option with name "linking_restrictions" is created.
49476+
49477+config GRKERNSEC_FIFO
49478+ bool "FIFO restrictions"
49479+ help
49480+ If you say Y here, users will not be able to write to FIFOs they don't
49481+ own in world-writable +t directories (e.g. /tmp), unless the owner of
49482+ the FIFO is the same owner of the directory it's held in. If the sysctl
49483+ option is enabled, a sysctl option with name "fifo_restrictions" is
49484+ created.
49485+
49486+config GRKERNSEC_SYSFS_RESTRICT
49487+ bool "Sysfs/debugfs restriction"
49488+ depends on SYSFS
49489+ help
49490+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
49491+ any filesystem normally mounted under it (e.g. debugfs) will be
49492+ mostly accessible only by root. These filesystems generally provide access
49493+ to hardware and debug information that isn't appropriate for unprivileged
49494+ users of the system. Sysfs and debugfs have also become a large source
49495+ of new vulnerabilities, ranging from infoleaks to local compromise.
49496+ There has been very little oversight with an eye toward security involved
49497+ in adding new exporters of information to these filesystems, so their
49498+ use is discouraged.
49499+ For reasons of compatibility, a few directories have been whitelisted
49500+ for access by non-root users:
49501+ /sys/fs/selinux
49502+ /sys/fs/fuse
49503+ /sys/devices/system/cpu
49504+
49505+config GRKERNSEC_ROFS
49506+ bool "Runtime read-only mount protection"
49507+ help
49508+ If you say Y here, a sysctl option with name "romount_protect" will
49509+ be created. By setting this option to 1 at runtime, filesystems
49510+ will be protected in the following ways:
49511+ * No new writable mounts will be allowed
49512+ * Existing read-only mounts won't be able to be remounted read/write
49513+ * Write operations will be denied on all block devices
49514+ This option acts independently of grsec_lock: once it is set to 1,
49515+ it cannot be turned off. Therefore, please be mindful of the resulting
49516+ behavior if this option is enabled in an init script on a read-only
49517+ filesystem. This feature is mainly intended for secure embedded systems.
49518+
49519+config GRKERNSEC_CHROOT
49520+ bool "Chroot jail restrictions"
49521+ help
49522+ If you say Y here, you will be able to choose several options that will
49523+ make breaking out of a chrooted jail much more difficult. If you
49524+ encounter no software incompatibilities with the following options, it
49525+ is recommended that you enable each one.
49526+
49527+config GRKERNSEC_CHROOT_MOUNT
49528+ bool "Deny mounts"
49529+ depends on GRKERNSEC_CHROOT
49530+ help
49531+ If you say Y here, processes inside a chroot will not be able to
49532+ mount or remount filesystems. If the sysctl option is enabled, a
49533+ sysctl option with name "chroot_deny_mount" is created.
49534+
49535+config GRKERNSEC_CHROOT_DOUBLE
49536+ bool "Deny double-chroots"
49537+ depends on GRKERNSEC_CHROOT
49538+ help
49539+ If you say Y here, processes inside a chroot will not be able to chroot
49540+ again outside the chroot. This is a widely used method of breaking
49541+ out of a chroot jail and should not be allowed. If the sysctl
49542+ option is enabled, a sysctl option with name
49543+ "chroot_deny_chroot" is created.
49544+
49545+config GRKERNSEC_CHROOT_PIVOT
49546+ bool "Deny pivot_root in chroot"
49547+ depends on GRKERNSEC_CHROOT
49548+ help
49549+ If you say Y here, processes inside a chroot will not be able to use
49550+ a function called pivot_root() that was introduced in Linux 2.3.41. It
49551+ works similar to chroot in that it changes the root filesystem. This
49552+ function could be misused in a chrooted process to attempt to break out
49553+ of the chroot, and therefore should not be allowed. If the sysctl
49554+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
49555+ created.
49556+
49557+config GRKERNSEC_CHROOT_CHDIR
49558+ bool "Enforce chdir(\"/\") on all chroots"
49559+ depends on GRKERNSEC_CHROOT
49560+ help
49561+ If you say Y here, the current working directory of all newly-chrooted
49562+ applications will be set to the the root directory of the chroot.
49563+ The man page on chroot(2) states:
49564+ Note that this call does not change the current working
49565+ directory, so that `.' can be outside the tree rooted at
49566+ `/'. In particular, the super-user can escape from a
49567+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
49568+
49569+ It is recommended that you say Y here, since it's not known to break
49570+ any software. If the sysctl option is enabled, a sysctl option with
49571+ name "chroot_enforce_chdir" is created.
49572+
49573+config GRKERNSEC_CHROOT_CHMOD
49574+ bool "Deny (f)chmod +s"
49575+ depends on GRKERNSEC_CHROOT
49576+ help
49577+ If you say Y here, processes inside a chroot will not be able to chmod
49578+ or fchmod files to make them have suid or sgid bits. This protects
49579+ against another published method of breaking a chroot. If the sysctl
49580+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
49581+ created.
49582+
49583+config GRKERNSEC_CHROOT_FCHDIR
49584+ bool "Deny fchdir out of chroot"
49585+ depends on GRKERNSEC_CHROOT
49586+ help
49587+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
49588+ to a file descriptor of the chrooting process that points to a directory
49589+ outside the filesystem will be stopped. If the sysctl option
49590+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
49591+
49592+config GRKERNSEC_CHROOT_MKNOD
49593+ bool "Deny mknod"
49594+ depends on GRKERNSEC_CHROOT
49595+ help
49596+ If you say Y here, processes inside a chroot will not be allowed to
49597+ mknod. The problem with using mknod inside a chroot is that it
49598+ would allow an attacker to create a device entry that is the same
49599+ as one on the physical root of your system, which could range from
49600+ anything from the console device to a device for your harddrive (which
49601+ they could then use to wipe the drive or steal data). It is recommended
49602+ that you say Y here, unless you run into software incompatibilities.
49603+ If the sysctl option is enabled, a sysctl option with name
49604+ "chroot_deny_mknod" is created.
49605+
49606+config GRKERNSEC_CHROOT_SHMAT
49607+ bool "Deny shmat() out of chroot"
49608+ depends on GRKERNSEC_CHROOT
49609+ help
49610+ If you say Y here, processes inside a chroot will not be able to attach
49611+ to shared memory segments that were created outside of the chroot jail.
49612+ It is recommended that you say Y here. If the sysctl option is enabled,
49613+ a sysctl option with name "chroot_deny_shmat" is created.
49614+
49615+config GRKERNSEC_CHROOT_UNIX
49616+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
49617+ depends on GRKERNSEC_CHROOT
49618+ help
49619+ If you say Y here, processes inside a chroot will not be able to
49620+ connect to abstract (meaning not belonging to a filesystem) Unix
49621+ domain sockets that were bound outside of a chroot. It is recommended
49622+ that you say Y here. If the sysctl option is enabled, a sysctl option
49623+ with name "chroot_deny_unix" is created.
49624+
49625+config GRKERNSEC_CHROOT_FINDTASK
49626+ bool "Protect outside processes"
49627+ depends on GRKERNSEC_CHROOT
49628+ help
49629+ If you say Y here, processes inside a chroot will not be able to
49630+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
49631+ getsid, or view any process outside of the chroot. If the sysctl
49632+ option is enabled, a sysctl option with name "chroot_findtask" is
49633+ created.
49634+
49635+config GRKERNSEC_CHROOT_NICE
49636+ bool "Restrict priority changes"
49637+ depends on GRKERNSEC_CHROOT
49638+ help
49639+ If you say Y here, processes inside a chroot will not be able to raise
49640+ the priority of processes in the chroot, or alter the priority of
49641+ processes outside the chroot. This provides more security than simply
49642+ removing CAP_SYS_NICE from the process' capability set. If the
49643+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
49644+ is created.
49645+
49646+config GRKERNSEC_CHROOT_SYSCTL
49647+ bool "Deny sysctl writes"
49648+ depends on GRKERNSEC_CHROOT
49649+ help
49650+ If you say Y here, an attacker in a chroot will not be able to
49651+ write to sysctl entries, either by sysctl(2) or through a /proc
49652+ interface. It is strongly recommended that you say Y here. If the
49653+ sysctl option is enabled, a sysctl option with name
49654+ "chroot_deny_sysctl" is created.
49655+
49656+config GRKERNSEC_CHROOT_CAPS
49657+ bool "Capability restrictions"
49658+ depends on GRKERNSEC_CHROOT
49659+ help
49660+ If you say Y here, the capabilities on all processes within a
49661+ chroot jail will be lowered to stop module insertion, raw i/o,
49662+ system and net admin tasks, rebooting the system, modifying immutable
49663+ files, modifying IPC owned by another, and changing the system time.
49664+ This is left an option because it can break some apps. Disable this
49665+ if your chrooted apps are having problems performing those kinds of
49666+ tasks. If the sysctl option is enabled, a sysctl option with
49667+ name "chroot_caps" is created.
49668+
49669+endmenu
49670+menu "Kernel Auditing"
49671+depends on GRKERNSEC
49672+
49673+config GRKERNSEC_AUDIT_GROUP
49674+ bool "Single group for auditing"
49675+ help
49676+ If you say Y here, the exec, chdir, and (un)mount logging features
49677+ will only operate on a group you specify. This option is recommended
49678+ if you only want to watch certain users instead of having a large
49679+ amount of logs from the entire system. If the sysctl option is enabled,
49680+ a sysctl option with name "audit_group" is created.
49681+
49682+config GRKERNSEC_AUDIT_GID
49683+ int "GID for auditing"
49684+ depends on GRKERNSEC_AUDIT_GROUP
49685+ default 1007
49686+
49687+config GRKERNSEC_EXECLOG
49688+ bool "Exec logging"
49689+ help
49690+ If you say Y here, all execve() calls will be logged (since the
49691+ other exec*() calls are frontends to execve(), all execution
49692+ will be logged). Useful for shell-servers that like to keep track
49693+ of their users. If the sysctl option is enabled, a sysctl option with
49694+ name "exec_logging" is created.
49695+ WARNING: This option when enabled will produce a LOT of logs, especially
49696+ on an active system.
49697+
49698+config GRKERNSEC_RESLOG
49699+ bool "Resource logging"
49700+ help
49701+ If you say Y here, all attempts to overstep resource limits will
49702+ be logged with the resource name, the requested size, and the current
49703+ limit. It is highly recommended that you say Y here. If the sysctl
49704+ option is enabled, a sysctl option with name "resource_logging" is
49705+ created. If the RBAC system is enabled, the sysctl value is ignored.
49706+
49707+config GRKERNSEC_CHROOT_EXECLOG
49708+ bool "Log execs within chroot"
49709+ help
49710+ If you say Y here, all executions inside a chroot jail will be logged
49711+ to syslog. This can cause a large amount of logs if certain
49712+ applications (eg. djb's daemontools) are installed on the system, and
49713+ is therefore left as an option. If the sysctl option is enabled, a
49714+ sysctl option with name "chroot_execlog" is created.
49715+
49716+config GRKERNSEC_AUDIT_PTRACE
49717+ bool "Ptrace logging"
49718+ help
49719+ If you say Y here, all attempts to attach to a process via ptrace
49720+ will be logged. If the sysctl option is enabled, a sysctl option
49721+ with name "audit_ptrace" is created.
49722+
49723+config GRKERNSEC_AUDIT_CHDIR
49724+ bool "Chdir logging"
49725+ help
49726+ If you say Y here, all chdir() calls will be logged. If the sysctl
49727+ option is enabled, a sysctl option with name "audit_chdir" is created.
49728+
49729+config GRKERNSEC_AUDIT_MOUNT
49730+ bool "(Un)Mount logging"
49731+ help
49732+ If you say Y here, all mounts and unmounts will be logged. If the
49733+ sysctl option is enabled, a sysctl option with name "audit_mount" is
49734+ created.
49735+
49736+config GRKERNSEC_SIGNAL
49737+ bool "Signal logging"
49738+ help
49739+ If you say Y here, certain important signals will be logged, such as
49740+ SIGSEGV, which will as a result inform you of when a error in a program
49741+ occurred, which in some cases could mean a possible exploit attempt.
49742+ If the sysctl option is enabled, a sysctl option with name
49743+ "signal_logging" is created.
49744+
49745+config GRKERNSEC_FORKFAIL
49746+ bool "Fork failure logging"
49747+ help
49748+ If you say Y here, all failed fork() attempts will be logged.
49749+ This could suggest a fork bomb, or someone attempting to overstep
49750+ their process limit. If the sysctl option is enabled, a sysctl option
49751+ with name "forkfail_logging" is created.
49752+
49753+config GRKERNSEC_TIME
49754+ bool "Time change logging"
49755+ help
49756+ If you say Y here, any changes of the system clock will be logged.
49757+ If the sysctl option is enabled, a sysctl option with name
49758+ "timechange_logging" is created.
49759+
49760+config GRKERNSEC_PROC_IPADDR
49761+ bool "/proc/<pid>/ipaddr support"
49762+ help
49763+ If you say Y here, a new entry will be added to each /proc/<pid>
49764+ directory that contains the IP address of the person using the task.
49765+ The IP is carried across local TCP and AF_UNIX stream sockets.
49766+ This information can be useful for IDS/IPSes to perform remote response
49767+ to a local attack. The entry is readable by only the owner of the
49768+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
49769+ the RBAC system), and thus does not create privacy concerns.
49770+
49771+config GRKERNSEC_RWXMAP_LOG
49772+ bool 'Denied RWX mmap/mprotect logging'
49773+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
49774+ help
49775+ If you say Y here, calls to mmap() and mprotect() with explicit
49776+ usage of PROT_WRITE and PROT_EXEC together will be logged when
49777+ denied by the PAX_MPROTECT feature. If the sysctl option is
49778+ enabled, a sysctl option with name "rwxmap_logging" is created.
49779+
49780+config GRKERNSEC_AUDIT_TEXTREL
49781+ bool 'ELF text relocations logging (READ HELP)'
49782+ depends on PAX_MPROTECT
49783+ help
49784+ If you say Y here, text relocations will be logged with the filename
49785+ of the offending library or binary. The purpose of the feature is
49786+ to help Linux distribution developers get rid of libraries and
49787+ binaries that need text relocations which hinder the future progress
49788+ of PaX. Only Linux distribution developers should say Y here, and
49789+ never on a production machine, as this option creates an information
49790+ leak that could aid an attacker in defeating the randomization of
49791+ a single memory region. If the sysctl option is enabled, a sysctl
49792+ option with name "audit_textrel" is created.
49793+
49794+endmenu
49795+
49796+menu "Executable Protections"
49797+depends on GRKERNSEC
49798+
49799+config GRKERNSEC_DMESG
49800+ bool "Dmesg(8) restriction"
49801+ help
49802+ If you say Y here, non-root users will not be able to use dmesg(8)
49803+ to view up to the last 4kb of messages in the kernel's log buffer.
49804+ The kernel's log buffer often contains kernel addresses and other
49805+ identifying information useful to an attacker in fingerprinting a
49806+ system for a targeted exploit.
49807+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
49808+ created.
49809+
49810+config GRKERNSEC_HARDEN_PTRACE
49811+ bool "Deter ptrace-based process snooping"
49812+ help
49813+ If you say Y here, TTY sniffers and other malicious monitoring
49814+ programs implemented through ptrace will be defeated. If you
49815+ have been using the RBAC system, this option has already been
49816+ enabled for several years for all users, with the ability to make
49817+ fine-grained exceptions.
49818+
49819+ This option only affects the ability of non-root users to ptrace
49820+ processes that are not a descendent of the ptracing process.
49821+ This means that strace ./binary and gdb ./binary will still work,
49822+ but attaching to arbitrary processes will not. If the sysctl
49823+ option is enabled, a sysctl option with name "harden_ptrace" is
49824+ created.
49825+
49826+config GRKERNSEC_PTRACE_READEXEC
49827+ bool "Require read access to ptrace sensitive binaries"
49828+ help
49829+ If you say Y here, unprivileged users will not be able to ptrace unreadable
49830+ binaries. This option is useful in environments that
49831+ remove the read bits (e.g. file mode 4711) from suid binaries to
49832+ prevent infoleaking of their contents. This option adds
49833+ consistency to the use of that file mode, as the binary could normally
49834+ be read out when run without privileges while ptracing.
49835+
49836+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
49837+ is created.
49838+
49839+config GRKERNSEC_SETXID
49840+ bool "Enforce consistent multithreaded privileges"
49841+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
49842+ help
49843+ If you say Y here, a change from a root uid to a non-root uid
49844+ in a multithreaded application will cause the resulting uids,
49845+ gids, supplementary groups, and capabilities in that thread
49846+ to be propagated to the other threads of the process. In most
49847+ cases this is unnecessary, as glibc will emulate this behavior
49848+ on behalf of the application. Other libcs do not act in the
49849+ same way, allowing the other threads of the process to continue
49850+ running with root privileges. If the sysctl option is enabled,
49851+ a sysctl option with name "consistent_setxid" is created.
49852+
49853+config GRKERNSEC_TPE
49854+ bool "Trusted Path Execution (TPE)"
49855+ help
49856+ If you say Y here, you will be able to choose a gid to add to the
49857+ supplementary groups of users you want to mark as "untrusted."
49858+ These users will not be able to execute any files that are not in
49859+ root-owned directories writable only by root. If the sysctl option
49860+ is enabled, a sysctl option with name "tpe" is created.
49861+
49862+config GRKERNSEC_TPE_ALL
49863+ bool "Partially restrict all non-root users"
49864+ depends on GRKERNSEC_TPE
49865+ help
49866+ If you say Y here, all non-root users will be covered under
49867+ a weaker TPE restriction. This is separate from, and in addition to,
49868+ the main TPE options that you have selected elsewhere. Thus, if a
49869+ "trusted" GID is chosen, this restriction applies to even that GID.
49870+ Under this restriction, all non-root users will only be allowed to
49871+ execute files in directories they own that are not group or
49872+ world-writable, or in directories owned by root and writable only by
49873+ root. If the sysctl option is enabled, a sysctl option with name
49874+ "tpe_restrict_all" is created.
49875+
49876+config GRKERNSEC_TPE_INVERT
49877+ bool "Invert GID option"
49878+ depends on GRKERNSEC_TPE
49879+ help
49880+ If you say Y here, the group you specify in the TPE configuration will
49881+ decide what group TPE restrictions will be *disabled* for. This
49882+ option is useful if you want TPE restrictions to be applied to most
49883+ users on the system. If the sysctl option is enabled, a sysctl option
49884+ with name "tpe_invert" is created. Unlike other sysctl options, this
49885+ entry will default to on for backward-compatibility.
49886+
49887+config GRKERNSEC_TPE_GID
49888+ int "GID for untrusted users"
49889+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
49890+ default 1005
49891+ help
49892+ Setting this GID determines what group TPE restrictions will be
49893+ *enabled* for. If the sysctl option is enabled, a sysctl option
49894+ with name "tpe_gid" is created.
49895+
49896+config GRKERNSEC_TPE_GID
49897+ int "GID for trusted users"
49898+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
49899+ default 1005
49900+ help
49901+ Setting this GID determines what group TPE restrictions will be
49902+ *disabled* for. If the sysctl option is enabled, a sysctl option
49903+ with name "tpe_gid" is created.
49904+
49905+endmenu
49906+menu "Network Protections"
49907+depends on GRKERNSEC
49908+
49909+config GRKERNSEC_RANDNET
49910+ bool "Larger entropy pools"
49911+ help
49912+ If you say Y here, the entropy pools used for many features of Linux
49913+ and grsecurity will be doubled in size. Since several grsecurity
49914+ features use additional randomness, it is recommended that you say Y
49915+ here. Saying Y here has a similar effect as modifying
49916+ /proc/sys/kernel/random/poolsize.
49917+
49918+config GRKERNSEC_BLACKHOLE
49919+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
49920+ depends on NET
49921+ help
49922+ If you say Y here, neither TCP resets nor ICMP
49923+ destination-unreachable packets will be sent in response to packets
49924+ sent to ports for which no associated listening process exists.
49925+ This feature supports both IPV4 and IPV6 and exempts the
49926+ loopback interface from blackholing. Enabling this feature
49927+ makes a host more resilient to DoS attacks and reduces network
49928+ visibility against scanners.
49929+
49930+ The blackhole feature as-implemented is equivalent to the FreeBSD
49931+ blackhole feature, as it prevents RST responses to all packets, not
49932+ just SYNs. Under most application behavior this causes no
49933+ problems, but applications (like haproxy) may not close certain
49934+ connections in a way that cleanly terminates them on the remote
49935+ end, leaving the remote host in LAST_ACK state. Because of this
49936+ side-effect and to prevent intentional LAST_ACK DoSes, this
49937+ feature also adds automatic mitigation against such attacks.
49938+ The mitigation drastically reduces the amount of time a socket
49939+ can spend in LAST_ACK state. If you're using haproxy and not
49940+ all servers it connects to have this option enabled, consider
49941+ disabling this feature on the haproxy host.
49942+
49943+ If the sysctl option is enabled, two sysctl options with names
49944+ "ip_blackhole" and "lastack_retries" will be created.
49945+ While "ip_blackhole" takes the standard zero/non-zero on/off
49946+ toggle, "lastack_retries" uses the same kinds of values as
49947+ "tcp_retries1" and "tcp_retries2". The default value of 4
49948+ prevents a socket from lasting more than 45 seconds in LAST_ACK
49949+ state.
49950+
49951+config GRKERNSEC_SOCKET
49952+ bool "Socket restrictions"
49953+ depends on NET
49954+ help
49955+ If you say Y here, you will be able to choose from several options.
49956+ If you assign a GID on your system and add it to the supplementary
49957+ groups of users you want to restrict socket access to, this patch
49958+ will perform up to three things, based on the option(s) you choose.
49959+
49960+config GRKERNSEC_SOCKET_ALL
49961+ bool "Deny any sockets to group"
49962+ depends on GRKERNSEC_SOCKET
49963+ help
49964+ If you say Y here, you will be able to choose a GID of whose users will
49965+ be unable to connect to other hosts from your machine or run server
49966+ applications from your machine. If the sysctl option is enabled, a
49967+ sysctl option with name "socket_all" is created.
49968+
49969+config GRKERNSEC_SOCKET_ALL_GID
49970+ int "GID to deny all sockets for"
49971+ depends on GRKERNSEC_SOCKET_ALL
49972+ default 1004
49973+ help
49974+ Here you can choose the GID to disable socket access for. Remember to
49975+ add the users you want socket access disabled for to the GID
49976+ specified here. If the sysctl option is enabled, a sysctl option
49977+ with name "socket_all_gid" is created.
49978+
49979+config GRKERNSEC_SOCKET_CLIENT
49980+ bool "Deny client sockets to group"
49981+ depends on GRKERNSEC_SOCKET
49982+ help
49983+ If you say Y here, you will be able to choose a GID of whose users will
49984+ be unable to connect to other hosts from your machine, but will be
49985+ able to run servers. If this option is enabled, all users in the group
49986+ you specify will have to use passive mode when initiating ftp transfers
49987+ from the shell on your machine. If the sysctl option is enabled, a
49988+ sysctl option with name "socket_client" is created.
49989+
49990+config GRKERNSEC_SOCKET_CLIENT_GID
49991+ int "GID to deny client sockets for"
49992+ depends on GRKERNSEC_SOCKET_CLIENT
49993+ default 1003
49994+ help
49995+ Here you can choose the GID to disable client socket access for.
49996+ Remember to add the users you want client socket access disabled for to
49997+ the GID specified here. If the sysctl option is enabled, a sysctl
49998+ option with name "socket_client_gid" is created.
49999+
50000+config GRKERNSEC_SOCKET_SERVER
50001+ bool "Deny server sockets to group"
50002+ depends on GRKERNSEC_SOCKET
50003+ help
50004+ If you say Y here, you will be able to choose a GID of whose users will
50005+ be unable to run server applications from your machine. If the sysctl
50006+ option is enabled, a sysctl option with name "socket_server" is created.
50007+
50008+config GRKERNSEC_SOCKET_SERVER_GID
50009+ int "GID to deny server sockets for"
50010+ depends on GRKERNSEC_SOCKET_SERVER
50011+ default 1002
50012+ help
50013+ Here you can choose the GID to disable server socket access for.
50014+ Remember to add the users you want server socket access disabled for to
50015+ the GID specified here. If the sysctl option is enabled, a sysctl
50016+ option with name "socket_server_gid" is created.
50017+
50018+endmenu
50019+menu "Sysctl support"
50020+depends on GRKERNSEC && SYSCTL
50021+
50022+config GRKERNSEC_SYSCTL
50023+ bool "Sysctl support"
50024+ help
50025+ If you say Y here, you will be able to change the options that
50026+ grsecurity runs with at bootup, without having to recompile your
50027+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
50028+ to enable (1) or disable (0) various features. All the sysctl entries
50029+ are mutable until the "grsec_lock" entry is set to a non-zero value.
50030+ All features enabled in the kernel configuration are disabled at boot
50031+ if you do not say Y to the "Turn on features by default" option.
50032+ All options should be set at startup, and the grsec_lock entry should
50033+ be set to a non-zero value after all the options are set.
50034+ *THIS IS EXTREMELY IMPORTANT*
50035+
50036+config GRKERNSEC_SYSCTL_DISTRO
50037+ bool "Extra sysctl support for distro makers (READ HELP)"
50038+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
50039+ help
50040+ If you say Y here, additional sysctl options will be created
50041+ for features that affect processes running as root. Therefore,
50042+ it is critical when using this option that the grsec_lock entry be
50043+ enabled after boot. Only distros with prebuilt kernel packages
50044+ with this option enabled that can ensure grsec_lock is enabled
50045+ after boot should use this option.
50046+ *Failure to set grsec_lock after boot makes all grsec features
50047+ this option covers useless*
50048+
50049+ Currently this option creates the following sysctl entries:
50050+ "Disable Privileged I/O": "disable_priv_io"
50051+
50052+config GRKERNSEC_SYSCTL_ON
50053+ bool "Turn on features by default"
50054+ depends on GRKERNSEC_SYSCTL
50055+ help
50056+ If you say Y here, instead of having all features enabled in the
50057+ kernel configuration disabled at boot time, the features will be
50058+ enabled at boot time. It is recommended you say Y here unless
50059+ there is some reason you would want all sysctl-tunable features to
50060+ be disabled by default. As mentioned elsewhere, it is important
50061+ to enable the grsec_lock entry once you have finished modifying
50062+ the sysctl entries.
50063+
50064+endmenu
50065+menu "Logging Options"
50066+depends on GRKERNSEC
50067+
50068+config GRKERNSEC_FLOODTIME
50069+ int "Seconds in between log messages (minimum)"
50070+ default 10
50071+ help
50072+ This option allows you to enforce the number of seconds between
50073+ grsecurity log messages. The default should be suitable for most
50074+ people, however, if you choose to change it, choose a value small enough
50075+ to allow informative logs to be produced, but large enough to
50076+ prevent flooding.
50077+
50078+config GRKERNSEC_FLOODBURST
50079+ int "Number of messages in a burst (maximum)"
50080+ default 6
50081+ help
50082+ This option allows you to choose the maximum number of messages allowed
50083+ within the flood time interval you chose in a separate option. The
50084+ default should be suitable for most people, however if you find that
50085+ many of your logs are being interpreted as flooding, you may want to
50086+ raise this value.
50087+
50088+endmenu
50089+
50090+endmenu
50091diff --git a/grsecurity/Makefile b/grsecurity/Makefile
50092new file mode 100644
50093index 0000000..1b9afa9
50094--- /dev/null
50095+++ b/grsecurity/Makefile
50096@@ -0,0 +1,38 @@
50097+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
50098+# during 2001-2009 it has been completely redesigned by Brad Spengler
50099+# into an RBAC system
50100+#
50101+# All code in this directory and various hooks inserted throughout the kernel
50102+# are copyright Brad Spengler - Open Source Security, Inc., and released
50103+# under the GPL v2 or higher
50104+
50105+KBUILD_CFLAGS += -Werror
50106+
50107+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
50108+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
50109+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
50110+
50111+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
50112+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
50113+ gracl_learn.o grsec_log.o
50114+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
50115+
50116+ifdef CONFIG_NET
50117+obj-y += grsec_sock.o
50118+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
50119+endif
50120+
50121+ifndef CONFIG_GRKERNSEC
50122+obj-y += grsec_disabled.o
50123+endif
50124+
50125+ifdef CONFIG_GRKERNSEC_HIDESYM
50126+extra-y := grsec_hidesym.o
50127+$(obj)/grsec_hidesym.o:
50128+ @-chmod -f 500 /boot
50129+ @-chmod -f 500 /lib/modules
50130+ @-chmod -f 500 /lib64/modules
50131+ @-chmod -f 500 /lib32/modules
50132+ @-chmod -f 700 .
50133+ @echo ' grsec: protected kernel image paths'
50134+endif
50135diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
50136new file mode 100644
50137index 0000000..00b6c54
50138--- /dev/null
50139+++ b/grsecurity/gracl.c
50140@@ -0,0 +1,4012 @@
50141+#include <linux/kernel.h>
50142+#include <linux/module.h>
50143+#include <linux/sched.h>
50144+#include <linux/mm.h>
50145+#include <linux/file.h>
50146+#include <linux/fs.h>
50147+#include <linux/namei.h>
50148+#include <linux/mount.h>
50149+#include <linux/tty.h>
50150+#include <linux/proc_fs.h>
50151+#include <linux/lglock.h>
50152+#include <linux/slab.h>
50153+#include <linux/vmalloc.h>
50154+#include <linux/types.h>
50155+#include <linux/sysctl.h>
50156+#include <linux/netdevice.h>
50157+#include <linux/ptrace.h>
50158+#include <linux/gracl.h>
50159+#include <linux/gralloc.h>
50160+#include <linux/security.h>
50161+#include <linux/grinternal.h>
50162+#include <linux/pid_namespace.h>
50163+#include <linux/fdtable.h>
50164+#include <linux/percpu.h>
50165+#include "../fs/mount.h"
50166+
50167+#include <asm/uaccess.h>
50168+#include <asm/errno.h>
50169+#include <asm/mman.h>
50170+
50171+static struct acl_role_db acl_role_set;
50172+static struct name_db name_set;
50173+static struct inodev_db inodev_set;
50174+
50175+/* for keeping track of userspace pointers used for subjects, so we
50176+ can share references in the kernel as well
50177+*/
50178+
50179+static struct path real_root;
50180+
50181+static struct acl_subj_map_db subj_map_set;
50182+
50183+static struct acl_role_label *default_role;
50184+
50185+static struct acl_role_label *role_list;
50186+
50187+static u16 acl_sp_role_value;
50188+
50189+extern char *gr_shared_page[4];
50190+static DEFINE_MUTEX(gr_dev_mutex);
50191+DEFINE_RWLOCK(gr_inode_lock);
50192+
50193+struct gr_arg *gr_usermode;
50194+
50195+static unsigned int gr_status __read_only = GR_STATUS_INIT;
50196+
50197+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
50198+extern void gr_clear_learn_entries(void);
50199+
50200+#ifdef CONFIG_GRKERNSEC_RESLOG
50201+extern void gr_log_resource(const struct task_struct *task,
50202+ const int res, const unsigned long wanted, const int gt);
50203+#endif
50204+
50205+unsigned char *gr_system_salt;
50206+unsigned char *gr_system_sum;
50207+
50208+static struct sprole_pw **acl_special_roles = NULL;
50209+static __u16 num_sprole_pws = 0;
50210+
50211+static struct acl_role_label *kernel_role = NULL;
50212+
50213+static unsigned int gr_auth_attempts = 0;
50214+static unsigned long gr_auth_expires = 0UL;
50215+
50216+#ifdef CONFIG_NET
50217+extern struct vfsmount *sock_mnt;
50218+#endif
50219+
50220+extern struct vfsmount *pipe_mnt;
50221+extern struct vfsmount *shm_mnt;
50222+#ifdef CONFIG_HUGETLBFS
50223+extern struct vfsmount *hugetlbfs_vfsmount;
50224+#endif
50225+
50226+static struct acl_object_label *fakefs_obj_rw;
50227+static struct acl_object_label *fakefs_obj_rwx;
50228+
50229+extern int gr_init_uidset(void);
50230+extern void gr_free_uidset(void);
50231+extern void gr_remove_uid(uid_t uid);
50232+extern int gr_find_uid(uid_t uid);
50233+
50234+DECLARE_BRLOCK(vfsmount_lock);
50235+
50236+__inline__ int
50237+gr_acl_is_enabled(void)
50238+{
50239+ return (gr_status & GR_READY);
50240+}
50241+
50242+#ifdef CONFIG_BTRFS_FS
50243+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
50244+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
50245+#endif
50246+
50247+static inline dev_t __get_dev(const struct dentry *dentry)
50248+{
50249+#ifdef CONFIG_BTRFS_FS
50250+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
50251+ return get_btrfs_dev_from_inode(dentry->d_inode);
50252+ else
50253+#endif
50254+ return dentry->d_inode->i_sb->s_dev;
50255+}
50256+
50257+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
50258+{
50259+ return __get_dev(dentry);
50260+}
50261+
50262+static char gr_task_roletype_to_char(struct task_struct *task)
50263+{
50264+ switch (task->role->roletype &
50265+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
50266+ GR_ROLE_SPECIAL)) {
50267+ case GR_ROLE_DEFAULT:
50268+ return 'D';
50269+ case GR_ROLE_USER:
50270+ return 'U';
50271+ case GR_ROLE_GROUP:
50272+ return 'G';
50273+ case GR_ROLE_SPECIAL:
50274+ return 'S';
50275+ }
50276+
50277+ return 'X';
50278+}
50279+
50280+char gr_roletype_to_char(void)
50281+{
50282+ return gr_task_roletype_to_char(current);
50283+}
50284+
50285+__inline__ int
50286+gr_acl_tpe_check(void)
50287+{
50288+ if (unlikely(!(gr_status & GR_READY)))
50289+ return 0;
50290+ if (current->role->roletype & GR_ROLE_TPE)
50291+ return 1;
50292+ else
50293+ return 0;
50294+}
50295+
50296+int
50297+gr_handle_rawio(const struct inode *inode)
50298+{
50299+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50300+ if (inode && S_ISBLK(inode->i_mode) &&
50301+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
50302+ !capable(CAP_SYS_RAWIO))
50303+ return 1;
50304+#endif
50305+ return 0;
50306+}
50307+
50308+static int
50309+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
50310+{
50311+ if (likely(lena != lenb))
50312+ return 0;
50313+
50314+ return !memcmp(a, b, lena);
50315+}
50316+
50317+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
50318+{
50319+ *buflen -= namelen;
50320+ if (*buflen < 0)
50321+ return -ENAMETOOLONG;
50322+ *buffer -= namelen;
50323+ memcpy(*buffer, str, namelen);
50324+ return 0;
50325+}
50326+
50327+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
50328+{
50329+ return prepend(buffer, buflen, name->name, name->len);
50330+}
50331+
50332+static int prepend_path(const struct path *path, struct path *root,
50333+ char **buffer, int *buflen)
50334+{
50335+ struct dentry *dentry = path->dentry;
50336+ struct vfsmount *vfsmnt = path->mnt;
50337+ struct mount *mnt = real_mount(vfsmnt);
50338+ bool slash = false;
50339+ int error = 0;
50340+
50341+ while (dentry != root->dentry || vfsmnt != root->mnt) {
50342+ struct dentry * parent;
50343+
50344+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
50345+ /* Global root? */
50346+ if (!mnt_has_parent(mnt)) {
50347+ goto out;
50348+ }
50349+ dentry = mnt->mnt_mountpoint;
50350+ mnt = mnt->mnt_parent;
50351+ vfsmnt = &mnt->mnt;
50352+ continue;
50353+ }
50354+ parent = dentry->d_parent;
50355+ prefetch(parent);
50356+ spin_lock(&dentry->d_lock);
50357+ error = prepend_name(buffer, buflen, &dentry->d_name);
50358+ spin_unlock(&dentry->d_lock);
50359+ if (!error)
50360+ error = prepend(buffer, buflen, "/", 1);
50361+ if (error)
50362+ break;
50363+
50364+ slash = true;
50365+ dentry = parent;
50366+ }
50367+
50368+out:
50369+ if (!error && !slash)
50370+ error = prepend(buffer, buflen, "/", 1);
50371+
50372+ return error;
50373+}
50374+
50375+/* this must be called with vfsmount_lock and rename_lock held */
50376+
50377+static char *__our_d_path(const struct path *path, struct path *root,
50378+ char *buf, int buflen)
50379+{
50380+ char *res = buf + buflen;
50381+ int error;
50382+
50383+ prepend(&res, &buflen, "\0", 1);
50384+ error = prepend_path(path, root, &res, &buflen);
50385+ if (error)
50386+ return ERR_PTR(error);
50387+
50388+ return res;
50389+}
50390+
50391+static char *
50392+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
50393+{
50394+ char *retval;
50395+
50396+ retval = __our_d_path(path, root, buf, buflen);
50397+ if (unlikely(IS_ERR(retval)))
50398+ retval = strcpy(buf, "<path too long>");
50399+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
50400+ retval[1] = '\0';
50401+
50402+ return retval;
50403+}
50404+
50405+static char *
50406+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50407+ char *buf, int buflen)
50408+{
50409+ struct path path;
50410+ char *res;
50411+
50412+ path.dentry = (struct dentry *)dentry;
50413+ path.mnt = (struct vfsmount *)vfsmnt;
50414+
50415+ /* we can use real_root.dentry, real_root.mnt, because this is only called
50416+ by the RBAC system */
50417+ res = gen_full_path(&path, &real_root, buf, buflen);
50418+
50419+ return res;
50420+}
50421+
50422+static char *
50423+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50424+ char *buf, int buflen)
50425+{
50426+ char *res;
50427+ struct path path;
50428+ struct path root;
50429+ struct task_struct *reaper = init_pid_ns.child_reaper;
50430+
50431+ path.dentry = (struct dentry *)dentry;
50432+ path.mnt = (struct vfsmount *)vfsmnt;
50433+
50434+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
50435+ get_fs_root(reaper->fs, &root);
50436+
50437+ write_seqlock(&rename_lock);
50438+ br_read_lock(vfsmount_lock);
50439+ res = gen_full_path(&path, &root, buf, buflen);
50440+ br_read_unlock(vfsmount_lock);
50441+ write_sequnlock(&rename_lock);
50442+
50443+ path_put(&root);
50444+ return res;
50445+}
50446+
50447+static char *
50448+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50449+{
50450+ char *ret;
50451+ write_seqlock(&rename_lock);
50452+ br_read_lock(vfsmount_lock);
50453+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50454+ PAGE_SIZE);
50455+ br_read_unlock(vfsmount_lock);
50456+ write_sequnlock(&rename_lock);
50457+ return ret;
50458+}
50459+
50460+static char *
50461+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50462+{
50463+ char *ret;
50464+ char *buf;
50465+ int buflen;
50466+
50467+ write_seqlock(&rename_lock);
50468+ br_read_lock(vfsmount_lock);
50469+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50470+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
50471+ buflen = (int)(ret - buf);
50472+ if (buflen >= 5)
50473+ prepend(&ret, &buflen, "/proc", 5);
50474+ else
50475+ ret = strcpy(buf, "<path too long>");
50476+ br_read_unlock(vfsmount_lock);
50477+ write_sequnlock(&rename_lock);
50478+ return ret;
50479+}
50480+
50481+char *
50482+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
50483+{
50484+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50485+ PAGE_SIZE);
50486+}
50487+
50488+char *
50489+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
50490+{
50491+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
50492+ PAGE_SIZE);
50493+}
50494+
50495+char *
50496+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
50497+{
50498+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
50499+ PAGE_SIZE);
50500+}
50501+
50502+char *
50503+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
50504+{
50505+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
50506+ PAGE_SIZE);
50507+}
50508+
50509+char *
50510+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
50511+{
50512+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
50513+ PAGE_SIZE);
50514+}
50515+
50516+__inline__ __u32
50517+to_gr_audit(const __u32 reqmode)
50518+{
50519+ /* masks off auditable permission flags, then shifts them to create
50520+ auditing flags, and adds the special case of append auditing if
50521+ we're requesting write */
50522+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
50523+}
50524+
50525+struct acl_subject_label *
50526+lookup_subject_map(const struct acl_subject_label *userp)
50527+{
50528+ unsigned int index = shash(userp, subj_map_set.s_size);
50529+ struct subject_map *match;
50530+
50531+ match = subj_map_set.s_hash[index];
50532+
50533+ while (match && match->user != userp)
50534+ match = match->next;
50535+
50536+ if (match != NULL)
50537+ return match->kernel;
50538+ else
50539+ return NULL;
50540+}
50541+
50542+static void
50543+insert_subj_map_entry(struct subject_map *subjmap)
50544+{
50545+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
50546+ struct subject_map **curr;
50547+
50548+ subjmap->prev = NULL;
50549+
50550+ curr = &subj_map_set.s_hash[index];
50551+ if (*curr != NULL)
50552+ (*curr)->prev = subjmap;
50553+
50554+ subjmap->next = *curr;
50555+ *curr = subjmap;
50556+
50557+ return;
50558+}
50559+
50560+static struct acl_role_label *
50561+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
50562+ const gid_t gid)
50563+{
50564+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
50565+ struct acl_role_label *match;
50566+ struct role_allowed_ip *ipp;
50567+ unsigned int x;
50568+ u32 curr_ip = task->signal->curr_ip;
50569+
50570+ task->signal->saved_ip = curr_ip;
50571+
50572+ match = acl_role_set.r_hash[index];
50573+
50574+ while (match) {
50575+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
50576+ for (x = 0; x < match->domain_child_num; x++) {
50577+ if (match->domain_children[x] == uid)
50578+ goto found;
50579+ }
50580+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
50581+ break;
50582+ match = match->next;
50583+ }
50584+found:
50585+ if (match == NULL) {
50586+ try_group:
50587+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
50588+ match = acl_role_set.r_hash[index];
50589+
50590+ while (match) {
50591+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
50592+ for (x = 0; x < match->domain_child_num; x++) {
50593+ if (match->domain_children[x] == gid)
50594+ goto found2;
50595+ }
50596+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
50597+ break;
50598+ match = match->next;
50599+ }
50600+found2:
50601+ if (match == NULL)
50602+ match = default_role;
50603+ if (match->allowed_ips == NULL)
50604+ return match;
50605+ else {
50606+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
50607+ if (likely
50608+ ((ntohl(curr_ip) & ipp->netmask) ==
50609+ (ntohl(ipp->addr) & ipp->netmask)))
50610+ return match;
50611+ }
50612+ match = default_role;
50613+ }
50614+ } else if (match->allowed_ips == NULL) {
50615+ return match;
50616+ } else {
50617+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
50618+ if (likely
50619+ ((ntohl(curr_ip) & ipp->netmask) ==
50620+ (ntohl(ipp->addr) & ipp->netmask)))
50621+ return match;
50622+ }
50623+ goto try_group;
50624+ }
50625+
50626+ return match;
50627+}
50628+
50629+struct acl_subject_label *
50630+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
50631+ const struct acl_role_label *role)
50632+{
50633+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
50634+ struct acl_subject_label *match;
50635+
50636+ match = role->subj_hash[index];
50637+
50638+ while (match && (match->inode != ino || match->device != dev ||
50639+ (match->mode & GR_DELETED))) {
50640+ match = match->next;
50641+ }
50642+
50643+ if (match && !(match->mode & GR_DELETED))
50644+ return match;
50645+ else
50646+ return NULL;
50647+}
50648+
50649+struct acl_subject_label *
50650+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
50651+ const struct acl_role_label *role)
50652+{
50653+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
50654+ struct acl_subject_label *match;
50655+
50656+ match = role->subj_hash[index];
50657+
50658+ while (match && (match->inode != ino || match->device != dev ||
50659+ !(match->mode & GR_DELETED))) {
50660+ match = match->next;
50661+ }
50662+
50663+ if (match && (match->mode & GR_DELETED))
50664+ return match;
50665+ else
50666+ return NULL;
50667+}
50668+
50669+static struct acl_object_label *
50670+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
50671+ const struct acl_subject_label *subj)
50672+{
50673+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
50674+ struct acl_object_label *match;
50675+
50676+ match = subj->obj_hash[index];
50677+
50678+ while (match && (match->inode != ino || match->device != dev ||
50679+ (match->mode & GR_DELETED))) {
50680+ match = match->next;
50681+ }
50682+
50683+ if (match && !(match->mode & GR_DELETED))
50684+ return match;
50685+ else
50686+ return NULL;
50687+}
50688+
50689+static struct acl_object_label *
50690+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
50691+ const struct acl_subject_label *subj)
50692+{
50693+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
50694+ struct acl_object_label *match;
50695+
50696+ match = subj->obj_hash[index];
50697+
50698+ while (match && (match->inode != ino || match->device != dev ||
50699+ !(match->mode & GR_DELETED))) {
50700+ match = match->next;
50701+ }
50702+
50703+ if (match && (match->mode & GR_DELETED))
50704+ return match;
50705+
50706+ match = subj->obj_hash[index];
50707+
50708+ while (match && (match->inode != ino || match->device != dev ||
50709+ (match->mode & GR_DELETED))) {
50710+ match = match->next;
50711+ }
50712+
50713+ if (match && !(match->mode & GR_DELETED))
50714+ return match;
50715+ else
50716+ return NULL;
50717+}
50718+
50719+static struct name_entry *
50720+lookup_name_entry(const char *name)
50721+{
50722+ unsigned int len = strlen(name);
50723+ unsigned int key = full_name_hash(name, len);
50724+ unsigned int index = key % name_set.n_size;
50725+ struct name_entry *match;
50726+
50727+ match = name_set.n_hash[index];
50728+
50729+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
50730+ match = match->next;
50731+
50732+ return match;
50733+}
50734+
50735+static struct name_entry *
50736+lookup_name_entry_create(const char *name)
50737+{
50738+ unsigned int len = strlen(name);
50739+ unsigned int key = full_name_hash(name, len);
50740+ unsigned int index = key % name_set.n_size;
50741+ struct name_entry *match;
50742+
50743+ match = name_set.n_hash[index];
50744+
50745+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
50746+ !match->deleted))
50747+ match = match->next;
50748+
50749+ if (match && match->deleted)
50750+ return match;
50751+
50752+ match = name_set.n_hash[index];
50753+
50754+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
50755+ match->deleted))
50756+ match = match->next;
50757+
50758+ if (match && !match->deleted)
50759+ return match;
50760+ else
50761+ return NULL;
50762+}
50763+
50764+static struct inodev_entry *
50765+lookup_inodev_entry(const ino_t ino, const dev_t dev)
50766+{
50767+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
50768+ struct inodev_entry *match;
50769+
50770+ match = inodev_set.i_hash[index];
50771+
50772+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
50773+ match = match->next;
50774+
50775+ return match;
50776+}
50777+
50778+static void
50779+insert_inodev_entry(struct inodev_entry *entry)
50780+{
50781+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
50782+ inodev_set.i_size);
50783+ struct inodev_entry **curr;
50784+
50785+ entry->prev = NULL;
50786+
50787+ curr = &inodev_set.i_hash[index];
50788+ if (*curr != NULL)
50789+ (*curr)->prev = entry;
50790+
50791+ entry->next = *curr;
50792+ *curr = entry;
50793+
50794+ return;
50795+}
50796+
50797+static void
50798+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
50799+{
50800+ unsigned int index =
50801+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
50802+ struct acl_role_label **curr;
50803+ struct acl_role_label *tmp, *tmp2;
50804+
50805+ curr = &acl_role_set.r_hash[index];
50806+
50807+ /* simple case, slot is empty, just set it to our role */
50808+ if (*curr == NULL) {
50809+ *curr = role;
50810+ } else {
50811+ /* example:
50812+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
50813+ 2 -> 3
50814+ */
50815+ /* first check to see if we can already be reached via this slot */
50816+ tmp = *curr;
50817+ while (tmp && tmp != role)
50818+ tmp = tmp->next;
50819+ if (tmp == role) {
50820+ /* we don't need to add ourselves to this slot's chain */
50821+ return;
50822+ }
50823+ /* we need to add ourselves to this chain, two cases */
50824+ if (role->next == NULL) {
50825+ /* simple case, append the current chain to our role */
50826+ role->next = *curr;
50827+ *curr = role;
50828+ } else {
50829+ /* 1 -> 2 -> 3 -> 4
50830+ 2 -> 3 -> 4
50831+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
50832+ */
50833+ /* trickier case: walk our role's chain until we find
50834+ the role for the start of the current slot's chain */
50835+ tmp = role;
50836+ tmp2 = *curr;
50837+ while (tmp->next && tmp->next != tmp2)
50838+ tmp = tmp->next;
50839+ if (tmp->next == tmp2) {
50840+ /* from example above, we found 3, so just
50841+ replace this slot's chain with ours */
50842+ *curr = role;
50843+ } else {
50844+ /* we didn't find a subset of our role's chain
50845+ in the current slot's chain, so append their
50846+ chain to ours, and set us as the first role in
50847+ the slot's chain
50848+
50849+ we could fold this case with the case above,
50850+ but making it explicit for clarity
50851+ */
50852+ tmp->next = tmp2;
50853+ *curr = role;
50854+ }
50855+ }
50856+ }
50857+
50858+ return;
50859+}
50860+
50861+static void
50862+insert_acl_role_label(struct acl_role_label *role)
50863+{
50864+ int i;
50865+
50866+ if (role_list == NULL) {
50867+ role_list = role;
50868+ role->prev = NULL;
50869+ } else {
50870+ role->prev = role_list;
50871+ role_list = role;
50872+ }
50873+
50874+ /* used for hash chains */
50875+ role->next = NULL;
50876+
50877+ if (role->roletype & GR_ROLE_DOMAIN) {
50878+ for (i = 0; i < role->domain_child_num; i++)
50879+ __insert_acl_role_label(role, role->domain_children[i]);
50880+ } else
50881+ __insert_acl_role_label(role, role->uidgid);
50882+}
50883+
50884+static int
50885+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
50886+{
50887+ struct name_entry **curr, *nentry;
50888+ struct inodev_entry *ientry;
50889+ unsigned int len = strlen(name);
50890+ unsigned int key = full_name_hash(name, len);
50891+ unsigned int index = key % name_set.n_size;
50892+
50893+ curr = &name_set.n_hash[index];
50894+
50895+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
50896+ curr = &((*curr)->next);
50897+
50898+ if (*curr != NULL)
50899+ return 1;
50900+
50901+ nentry = acl_alloc(sizeof (struct name_entry));
50902+ if (nentry == NULL)
50903+ return 0;
50904+ ientry = acl_alloc(sizeof (struct inodev_entry));
50905+ if (ientry == NULL)
50906+ return 0;
50907+ ientry->nentry = nentry;
50908+
50909+ nentry->key = key;
50910+ nentry->name = name;
50911+ nentry->inode = inode;
50912+ nentry->device = device;
50913+ nentry->len = len;
50914+ nentry->deleted = deleted;
50915+
50916+ nentry->prev = NULL;
50917+ curr = &name_set.n_hash[index];
50918+ if (*curr != NULL)
50919+ (*curr)->prev = nentry;
50920+ nentry->next = *curr;
50921+ *curr = nentry;
50922+
50923+ /* insert us into the table searchable by inode/dev */
50924+ insert_inodev_entry(ientry);
50925+
50926+ return 1;
50927+}
50928+
50929+static void
50930+insert_acl_obj_label(struct acl_object_label *obj,
50931+ struct acl_subject_label *subj)
50932+{
50933+ unsigned int index =
50934+ fhash(obj->inode, obj->device, subj->obj_hash_size);
50935+ struct acl_object_label **curr;
50936+
50937+
50938+ obj->prev = NULL;
50939+
50940+ curr = &subj->obj_hash[index];
50941+ if (*curr != NULL)
50942+ (*curr)->prev = obj;
50943+
50944+ obj->next = *curr;
50945+ *curr = obj;
50946+
50947+ return;
50948+}
50949+
50950+static void
50951+insert_acl_subj_label(struct acl_subject_label *obj,
50952+ struct acl_role_label *role)
50953+{
50954+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
50955+ struct acl_subject_label **curr;
50956+
50957+ obj->prev = NULL;
50958+
50959+ curr = &role->subj_hash[index];
50960+ if (*curr != NULL)
50961+ (*curr)->prev = obj;
50962+
50963+ obj->next = *curr;
50964+ *curr = obj;
50965+
50966+ return;
50967+}
50968+
50969+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
50970+
50971+static void *
50972+create_table(__u32 * len, int elementsize)
50973+{
50974+ unsigned int table_sizes[] = {
50975+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
50976+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
50977+ 4194301, 8388593, 16777213, 33554393, 67108859
50978+ };
50979+ void *newtable = NULL;
50980+ unsigned int pwr = 0;
50981+
50982+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
50983+ table_sizes[pwr] <= *len)
50984+ pwr++;
50985+
50986+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
50987+ return newtable;
50988+
50989+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
50990+ newtable =
50991+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
50992+ else
50993+ newtable = vmalloc(table_sizes[pwr] * elementsize);
50994+
50995+ *len = table_sizes[pwr];
50996+
50997+ return newtable;
50998+}
50999+
51000+static int
51001+init_variables(const struct gr_arg *arg)
51002+{
51003+ struct task_struct *reaper = init_pid_ns.child_reaper;
51004+ unsigned int stacksize;
51005+
51006+ subj_map_set.s_size = arg->role_db.num_subjects;
51007+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
51008+ name_set.n_size = arg->role_db.num_objects;
51009+ inodev_set.i_size = arg->role_db.num_objects;
51010+
51011+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
51012+ !name_set.n_size || !inodev_set.i_size)
51013+ return 1;
51014+
51015+ if (!gr_init_uidset())
51016+ return 1;
51017+
51018+ /* set up the stack that holds allocation info */
51019+
51020+ stacksize = arg->role_db.num_pointers + 5;
51021+
51022+ if (!acl_alloc_stack_init(stacksize))
51023+ return 1;
51024+
51025+ /* grab reference for the real root dentry and vfsmount */
51026+ get_fs_root(reaper->fs, &real_root);
51027+
51028+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51029+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
51030+#endif
51031+
51032+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
51033+ if (fakefs_obj_rw == NULL)
51034+ return 1;
51035+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
51036+
51037+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
51038+ if (fakefs_obj_rwx == NULL)
51039+ return 1;
51040+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
51041+
51042+ subj_map_set.s_hash =
51043+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
51044+ acl_role_set.r_hash =
51045+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
51046+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
51047+ inodev_set.i_hash =
51048+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
51049+
51050+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
51051+ !name_set.n_hash || !inodev_set.i_hash)
51052+ return 1;
51053+
51054+ memset(subj_map_set.s_hash, 0,
51055+ sizeof(struct subject_map *) * subj_map_set.s_size);
51056+ memset(acl_role_set.r_hash, 0,
51057+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
51058+ memset(name_set.n_hash, 0,
51059+ sizeof (struct name_entry *) * name_set.n_size);
51060+ memset(inodev_set.i_hash, 0,
51061+ sizeof (struct inodev_entry *) * inodev_set.i_size);
51062+
51063+ return 0;
51064+}
51065+
51066+/* free information not needed after startup
51067+ currently contains user->kernel pointer mappings for subjects
51068+*/
51069+
51070+static void
51071+free_init_variables(void)
51072+{
51073+ __u32 i;
51074+
51075+ if (subj_map_set.s_hash) {
51076+ for (i = 0; i < subj_map_set.s_size; i++) {
51077+ if (subj_map_set.s_hash[i]) {
51078+ kfree(subj_map_set.s_hash[i]);
51079+ subj_map_set.s_hash[i] = NULL;
51080+ }
51081+ }
51082+
51083+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
51084+ PAGE_SIZE)
51085+ kfree(subj_map_set.s_hash);
51086+ else
51087+ vfree(subj_map_set.s_hash);
51088+ }
51089+
51090+ return;
51091+}
51092+
51093+static void
51094+free_variables(void)
51095+{
51096+ struct acl_subject_label *s;
51097+ struct acl_role_label *r;
51098+ struct task_struct *task, *task2;
51099+ unsigned int x;
51100+
51101+ gr_clear_learn_entries();
51102+
51103+ read_lock(&tasklist_lock);
51104+ do_each_thread(task2, task) {
51105+ task->acl_sp_role = 0;
51106+ task->acl_role_id = 0;
51107+ task->acl = NULL;
51108+ task->role = NULL;
51109+ } while_each_thread(task2, task);
51110+ read_unlock(&tasklist_lock);
51111+
51112+ /* release the reference to the real root dentry and vfsmount */
51113+ path_put(&real_root);
51114+ memset(&real_root, 0, sizeof(real_root));
51115+
51116+ /* free all object hash tables */
51117+
51118+ FOR_EACH_ROLE_START(r)
51119+ if (r->subj_hash == NULL)
51120+ goto next_role;
51121+ FOR_EACH_SUBJECT_START(r, s, x)
51122+ if (s->obj_hash == NULL)
51123+ break;
51124+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51125+ kfree(s->obj_hash);
51126+ else
51127+ vfree(s->obj_hash);
51128+ FOR_EACH_SUBJECT_END(s, x)
51129+ FOR_EACH_NESTED_SUBJECT_START(r, s)
51130+ if (s->obj_hash == NULL)
51131+ break;
51132+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51133+ kfree(s->obj_hash);
51134+ else
51135+ vfree(s->obj_hash);
51136+ FOR_EACH_NESTED_SUBJECT_END(s)
51137+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
51138+ kfree(r->subj_hash);
51139+ else
51140+ vfree(r->subj_hash);
51141+ r->subj_hash = NULL;
51142+next_role:
51143+ FOR_EACH_ROLE_END(r)
51144+
51145+ acl_free_all();
51146+
51147+ if (acl_role_set.r_hash) {
51148+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
51149+ PAGE_SIZE)
51150+ kfree(acl_role_set.r_hash);
51151+ else
51152+ vfree(acl_role_set.r_hash);
51153+ }
51154+ if (name_set.n_hash) {
51155+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
51156+ PAGE_SIZE)
51157+ kfree(name_set.n_hash);
51158+ else
51159+ vfree(name_set.n_hash);
51160+ }
51161+
51162+ if (inodev_set.i_hash) {
51163+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
51164+ PAGE_SIZE)
51165+ kfree(inodev_set.i_hash);
51166+ else
51167+ vfree(inodev_set.i_hash);
51168+ }
51169+
51170+ gr_free_uidset();
51171+
51172+ memset(&name_set, 0, sizeof (struct name_db));
51173+ memset(&inodev_set, 0, sizeof (struct inodev_db));
51174+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
51175+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
51176+
51177+ default_role = NULL;
51178+ kernel_role = NULL;
51179+ role_list = NULL;
51180+
51181+ return;
51182+}
51183+
51184+static __u32
51185+count_user_objs(struct acl_object_label *userp)
51186+{
51187+ struct acl_object_label o_tmp;
51188+ __u32 num = 0;
51189+
51190+ while (userp) {
51191+ if (copy_from_user(&o_tmp, userp,
51192+ sizeof (struct acl_object_label)))
51193+ break;
51194+
51195+ userp = o_tmp.prev;
51196+ num++;
51197+ }
51198+
51199+ return num;
51200+}
51201+
51202+static struct acl_subject_label *
51203+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
51204+
51205+static int
51206+copy_user_glob(struct acl_object_label *obj)
51207+{
51208+ struct acl_object_label *g_tmp, **guser;
51209+ unsigned int len;
51210+ char *tmp;
51211+
51212+ if (obj->globbed == NULL)
51213+ return 0;
51214+
51215+ guser = &obj->globbed;
51216+ while (*guser) {
51217+ g_tmp = (struct acl_object_label *)
51218+ acl_alloc(sizeof (struct acl_object_label));
51219+ if (g_tmp == NULL)
51220+ return -ENOMEM;
51221+
51222+ if (copy_from_user(g_tmp, *guser,
51223+ sizeof (struct acl_object_label)))
51224+ return -EFAULT;
51225+
51226+ len = strnlen_user(g_tmp->filename, PATH_MAX);
51227+
51228+ if (!len || len >= PATH_MAX)
51229+ return -EINVAL;
51230+
51231+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51232+ return -ENOMEM;
51233+
51234+ if (copy_from_user(tmp, g_tmp->filename, len))
51235+ return -EFAULT;
51236+ tmp[len-1] = '\0';
51237+ g_tmp->filename = tmp;
51238+
51239+ *guser = g_tmp;
51240+ guser = &(g_tmp->next);
51241+ }
51242+
51243+ return 0;
51244+}
51245+
51246+static int
51247+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
51248+ struct acl_role_label *role)
51249+{
51250+ struct acl_object_label *o_tmp;
51251+ unsigned int len;
51252+ int ret;
51253+ char *tmp;
51254+
51255+ while (userp) {
51256+ if ((o_tmp = (struct acl_object_label *)
51257+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
51258+ return -ENOMEM;
51259+
51260+ if (copy_from_user(o_tmp, userp,
51261+ sizeof (struct acl_object_label)))
51262+ return -EFAULT;
51263+
51264+ userp = o_tmp->prev;
51265+
51266+ len = strnlen_user(o_tmp->filename, PATH_MAX);
51267+
51268+ if (!len || len >= PATH_MAX)
51269+ return -EINVAL;
51270+
51271+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51272+ return -ENOMEM;
51273+
51274+ if (copy_from_user(tmp, o_tmp->filename, len))
51275+ return -EFAULT;
51276+ tmp[len-1] = '\0';
51277+ o_tmp->filename = tmp;
51278+
51279+ insert_acl_obj_label(o_tmp, subj);
51280+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
51281+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
51282+ return -ENOMEM;
51283+
51284+ ret = copy_user_glob(o_tmp);
51285+ if (ret)
51286+ return ret;
51287+
51288+ if (o_tmp->nested) {
51289+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
51290+ if (IS_ERR(o_tmp->nested))
51291+ return PTR_ERR(o_tmp->nested);
51292+
51293+ /* insert into nested subject list */
51294+ o_tmp->nested->next = role->hash->first;
51295+ role->hash->first = o_tmp->nested;
51296+ }
51297+ }
51298+
51299+ return 0;
51300+}
51301+
51302+static __u32
51303+count_user_subjs(struct acl_subject_label *userp)
51304+{
51305+ struct acl_subject_label s_tmp;
51306+ __u32 num = 0;
51307+
51308+ while (userp) {
51309+ if (copy_from_user(&s_tmp, userp,
51310+ sizeof (struct acl_subject_label)))
51311+ break;
51312+
51313+ userp = s_tmp.prev;
51314+ /* do not count nested subjects against this count, since
51315+ they are not included in the hash table, but are
51316+ attached to objects. We have already counted
51317+ the subjects in userspace for the allocation
51318+ stack
51319+ */
51320+ if (!(s_tmp.mode & GR_NESTED))
51321+ num++;
51322+ }
51323+
51324+ return num;
51325+}
51326+
51327+static int
51328+copy_user_allowedips(struct acl_role_label *rolep)
51329+{
51330+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
51331+
51332+ ruserip = rolep->allowed_ips;
51333+
51334+ while (ruserip) {
51335+ rlast = rtmp;
51336+
51337+ if ((rtmp = (struct role_allowed_ip *)
51338+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
51339+ return -ENOMEM;
51340+
51341+ if (copy_from_user(rtmp, ruserip,
51342+ sizeof (struct role_allowed_ip)))
51343+ return -EFAULT;
51344+
51345+ ruserip = rtmp->prev;
51346+
51347+ if (!rlast) {
51348+ rtmp->prev = NULL;
51349+ rolep->allowed_ips = rtmp;
51350+ } else {
51351+ rlast->next = rtmp;
51352+ rtmp->prev = rlast;
51353+ }
51354+
51355+ if (!ruserip)
51356+ rtmp->next = NULL;
51357+ }
51358+
51359+ return 0;
51360+}
51361+
51362+static int
51363+copy_user_transitions(struct acl_role_label *rolep)
51364+{
51365+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
51366+
51367+ unsigned int len;
51368+ char *tmp;
51369+
51370+ rusertp = rolep->transitions;
51371+
51372+ while (rusertp) {
51373+ rlast = rtmp;
51374+
51375+ if ((rtmp = (struct role_transition *)
51376+ acl_alloc(sizeof (struct role_transition))) == NULL)
51377+ return -ENOMEM;
51378+
51379+ if (copy_from_user(rtmp, rusertp,
51380+ sizeof (struct role_transition)))
51381+ return -EFAULT;
51382+
51383+ rusertp = rtmp->prev;
51384+
51385+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
51386+
51387+ if (!len || len >= GR_SPROLE_LEN)
51388+ return -EINVAL;
51389+
51390+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51391+ return -ENOMEM;
51392+
51393+ if (copy_from_user(tmp, rtmp->rolename, len))
51394+ return -EFAULT;
51395+ tmp[len-1] = '\0';
51396+ rtmp->rolename = tmp;
51397+
51398+ if (!rlast) {
51399+ rtmp->prev = NULL;
51400+ rolep->transitions = rtmp;
51401+ } else {
51402+ rlast->next = rtmp;
51403+ rtmp->prev = rlast;
51404+ }
51405+
51406+ if (!rusertp)
51407+ rtmp->next = NULL;
51408+ }
51409+
51410+ return 0;
51411+}
51412+
51413+static struct acl_subject_label *
51414+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
51415+{
51416+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
51417+ unsigned int len;
51418+ char *tmp;
51419+ __u32 num_objs;
51420+ struct acl_ip_label **i_tmp, *i_utmp2;
51421+ struct gr_hash_struct ghash;
51422+ struct subject_map *subjmap;
51423+ unsigned int i_num;
51424+ int err;
51425+
51426+ s_tmp = lookup_subject_map(userp);
51427+
51428+ /* we've already copied this subject into the kernel, just return
51429+ the reference to it, and don't copy it over again
51430+ */
51431+ if (s_tmp)
51432+ return(s_tmp);
51433+
51434+ if ((s_tmp = (struct acl_subject_label *)
51435+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
51436+ return ERR_PTR(-ENOMEM);
51437+
51438+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
51439+ if (subjmap == NULL)
51440+ return ERR_PTR(-ENOMEM);
51441+
51442+ subjmap->user = userp;
51443+ subjmap->kernel = s_tmp;
51444+ insert_subj_map_entry(subjmap);
51445+
51446+ if (copy_from_user(s_tmp, userp,
51447+ sizeof (struct acl_subject_label)))
51448+ return ERR_PTR(-EFAULT);
51449+
51450+ len = strnlen_user(s_tmp->filename, PATH_MAX);
51451+
51452+ if (!len || len >= PATH_MAX)
51453+ return ERR_PTR(-EINVAL);
51454+
51455+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51456+ return ERR_PTR(-ENOMEM);
51457+
51458+ if (copy_from_user(tmp, s_tmp->filename, len))
51459+ return ERR_PTR(-EFAULT);
51460+ tmp[len-1] = '\0';
51461+ s_tmp->filename = tmp;
51462+
51463+ if (!strcmp(s_tmp->filename, "/"))
51464+ role->root_label = s_tmp;
51465+
51466+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
51467+ return ERR_PTR(-EFAULT);
51468+
51469+ /* copy user and group transition tables */
51470+
51471+ if (s_tmp->user_trans_num) {
51472+ uid_t *uidlist;
51473+
51474+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
51475+ if (uidlist == NULL)
51476+ return ERR_PTR(-ENOMEM);
51477+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
51478+ return ERR_PTR(-EFAULT);
51479+
51480+ s_tmp->user_transitions = uidlist;
51481+ }
51482+
51483+ if (s_tmp->group_trans_num) {
51484+ gid_t *gidlist;
51485+
51486+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
51487+ if (gidlist == NULL)
51488+ return ERR_PTR(-ENOMEM);
51489+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
51490+ return ERR_PTR(-EFAULT);
51491+
51492+ s_tmp->group_transitions = gidlist;
51493+ }
51494+
51495+ /* set up object hash table */
51496+ num_objs = count_user_objs(ghash.first);
51497+
51498+ s_tmp->obj_hash_size = num_objs;
51499+ s_tmp->obj_hash =
51500+ (struct acl_object_label **)
51501+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
51502+
51503+ if (!s_tmp->obj_hash)
51504+ return ERR_PTR(-ENOMEM);
51505+
51506+ memset(s_tmp->obj_hash, 0,
51507+ s_tmp->obj_hash_size *
51508+ sizeof (struct acl_object_label *));
51509+
51510+ /* add in objects */
51511+ err = copy_user_objs(ghash.first, s_tmp, role);
51512+
51513+ if (err)
51514+ return ERR_PTR(err);
51515+
51516+ /* set pointer for parent subject */
51517+ if (s_tmp->parent_subject) {
51518+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
51519+
51520+ if (IS_ERR(s_tmp2))
51521+ return s_tmp2;
51522+
51523+ s_tmp->parent_subject = s_tmp2;
51524+ }
51525+
51526+ /* add in ip acls */
51527+
51528+ if (!s_tmp->ip_num) {
51529+ s_tmp->ips = NULL;
51530+ goto insert;
51531+ }
51532+
51533+ i_tmp =
51534+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
51535+ sizeof (struct acl_ip_label *));
51536+
51537+ if (!i_tmp)
51538+ return ERR_PTR(-ENOMEM);
51539+
51540+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
51541+ *(i_tmp + i_num) =
51542+ (struct acl_ip_label *)
51543+ acl_alloc(sizeof (struct acl_ip_label));
51544+ if (!*(i_tmp + i_num))
51545+ return ERR_PTR(-ENOMEM);
51546+
51547+ if (copy_from_user
51548+ (&i_utmp2, s_tmp->ips + i_num,
51549+ sizeof (struct acl_ip_label *)))
51550+ return ERR_PTR(-EFAULT);
51551+
51552+ if (copy_from_user
51553+ (*(i_tmp + i_num), i_utmp2,
51554+ sizeof (struct acl_ip_label)))
51555+ return ERR_PTR(-EFAULT);
51556+
51557+ if ((*(i_tmp + i_num))->iface == NULL)
51558+ continue;
51559+
51560+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
51561+ if (!len || len >= IFNAMSIZ)
51562+ return ERR_PTR(-EINVAL);
51563+ tmp = acl_alloc(len);
51564+ if (tmp == NULL)
51565+ return ERR_PTR(-ENOMEM);
51566+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
51567+ return ERR_PTR(-EFAULT);
51568+ (*(i_tmp + i_num))->iface = tmp;
51569+ }
51570+
51571+ s_tmp->ips = i_tmp;
51572+
51573+insert:
51574+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
51575+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
51576+ return ERR_PTR(-ENOMEM);
51577+
51578+ return s_tmp;
51579+}
51580+
51581+static int
51582+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
51583+{
51584+ struct acl_subject_label s_pre;
51585+ struct acl_subject_label * ret;
51586+ int err;
51587+
51588+ while (userp) {
51589+ if (copy_from_user(&s_pre, userp,
51590+ sizeof (struct acl_subject_label)))
51591+ return -EFAULT;
51592+
51593+ /* do not add nested subjects here, add
51594+ while parsing objects
51595+ */
51596+
51597+ if (s_pre.mode & GR_NESTED) {
51598+ userp = s_pre.prev;
51599+ continue;
51600+ }
51601+
51602+ ret = do_copy_user_subj(userp, role);
51603+
51604+ err = PTR_ERR(ret);
51605+ if (IS_ERR(ret))
51606+ return err;
51607+
51608+ insert_acl_subj_label(ret, role);
51609+
51610+ userp = s_pre.prev;
51611+ }
51612+
51613+ return 0;
51614+}
51615+
51616+static int
51617+copy_user_acl(struct gr_arg *arg)
51618+{
51619+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
51620+ struct sprole_pw *sptmp;
51621+ struct gr_hash_struct *ghash;
51622+ uid_t *domainlist;
51623+ unsigned int r_num;
51624+ unsigned int len;
51625+ char *tmp;
51626+ int err = 0;
51627+ __u16 i;
51628+ __u32 num_subjs;
51629+
51630+ /* we need a default and kernel role */
51631+ if (arg->role_db.num_roles < 2)
51632+ return -EINVAL;
51633+
51634+ /* copy special role authentication info from userspace */
51635+
51636+ num_sprole_pws = arg->num_sprole_pws;
51637+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
51638+
51639+ if (!acl_special_roles && num_sprole_pws)
51640+ return -ENOMEM;
51641+
51642+ for (i = 0; i < num_sprole_pws; i++) {
51643+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
51644+ if (!sptmp)
51645+ return -ENOMEM;
51646+ if (copy_from_user(sptmp, arg->sprole_pws + i,
51647+ sizeof (struct sprole_pw)))
51648+ return -EFAULT;
51649+
51650+ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
51651+
51652+ if (!len || len >= GR_SPROLE_LEN)
51653+ return -EINVAL;
51654+
51655+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51656+ return -ENOMEM;
51657+
51658+ if (copy_from_user(tmp, sptmp->rolename, len))
51659+ return -EFAULT;
51660+
51661+ tmp[len-1] = '\0';
51662+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51663+ printk(KERN_ALERT "Copying special role %s\n", tmp);
51664+#endif
51665+ sptmp->rolename = tmp;
51666+ acl_special_roles[i] = sptmp;
51667+ }
51668+
51669+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
51670+
51671+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
51672+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
51673+
51674+ if (!r_tmp)
51675+ return -ENOMEM;
51676+
51677+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
51678+ sizeof (struct acl_role_label *)))
51679+ return -EFAULT;
51680+
51681+ if (copy_from_user(r_tmp, r_utmp2,
51682+ sizeof (struct acl_role_label)))
51683+ return -EFAULT;
51684+
51685+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
51686+
51687+ if (!len || len >= PATH_MAX)
51688+ return -EINVAL;
51689+
51690+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51691+ return -ENOMEM;
51692+
51693+ if (copy_from_user(tmp, r_tmp->rolename, len))
51694+ return -EFAULT;
51695+
51696+ tmp[len-1] = '\0';
51697+ r_tmp->rolename = tmp;
51698+
51699+ if (!strcmp(r_tmp->rolename, "default")
51700+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
51701+ default_role = r_tmp;
51702+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
51703+ kernel_role = r_tmp;
51704+ }
51705+
51706+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
51707+ return -ENOMEM;
51708+
51709+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
51710+ return -EFAULT;
51711+
51712+ r_tmp->hash = ghash;
51713+
51714+ num_subjs = count_user_subjs(r_tmp->hash->first);
51715+
51716+ r_tmp->subj_hash_size = num_subjs;
51717+ r_tmp->subj_hash =
51718+ (struct acl_subject_label **)
51719+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
51720+
51721+ if (!r_tmp->subj_hash)
51722+ return -ENOMEM;
51723+
51724+ err = copy_user_allowedips(r_tmp);
51725+ if (err)
51726+ return err;
51727+
51728+ /* copy domain info */
51729+ if (r_tmp->domain_children != NULL) {
51730+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
51731+ if (domainlist == NULL)
51732+ return -ENOMEM;
51733+
51734+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
51735+ return -EFAULT;
51736+
51737+ r_tmp->domain_children = domainlist;
51738+ }
51739+
51740+ err = copy_user_transitions(r_tmp);
51741+ if (err)
51742+ return err;
51743+
51744+ memset(r_tmp->subj_hash, 0,
51745+ r_tmp->subj_hash_size *
51746+ sizeof (struct acl_subject_label *));
51747+
51748+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
51749+
51750+ if (err)
51751+ return err;
51752+
51753+ /* set nested subject list to null */
51754+ r_tmp->hash->first = NULL;
51755+
51756+ insert_acl_role_label(r_tmp);
51757+ }
51758+
51759+ if (default_role == NULL || kernel_role == NULL)
51760+ return -EINVAL;
51761+
51762+ return err;
51763+}
51764+
51765+static int
51766+gracl_init(struct gr_arg *args)
51767+{
51768+ int error = 0;
51769+
51770+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
51771+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
51772+
51773+ if (init_variables(args)) {
51774+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
51775+ error = -ENOMEM;
51776+ free_variables();
51777+ goto out;
51778+ }
51779+
51780+ error = copy_user_acl(args);
51781+ free_init_variables();
51782+ if (error) {
51783+ free_variables();
51784+ goto out;
51785+ }
51786+
51787+ if ((error = gr_set_acls(0))) {
51788+ free_variables();
51789+ goto out;
51790+ }
51791+
51792+ pax_open_kernel();
51793+ gr_status |= GR_READY;
51794+ pax_close_kernel();
51795+
51796+ out:
51797+ return error;
51798+}
51799+
51800+/* derived from glibc fnmatch() 0: match, 1: no match*/
51801+
51802+static int
51803+glob_match(const char *p, const char *n)
51804+{
51805+ char c;
51806+
51807+ while ((c = *p++) != '\0') {
51808+ switch (c) {
51809+ case '?':
51810+ if (*n == '\0')
51811+ return 1;
51812+ else if (*n == '/')
51813+ return 1;
51814+ break;
51815+ case '\\':
51816+ if (*n != c)
51817+ return 1;
51818+ break;
51819+ case '*':
51820+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
51821+ if (*n == '/')
51822+ return 1;
51823+ else if (c == '?') {
51824+ if (*n == '\0')
51825+ return 1;
51826+ else
51827+ ++n;
51828+ }
51829+ }
51830+ if (c == '\0') {
51831+ return 0;
51832+ } else {
51833+ const char *endp;
51834+
51835+ if ((endp = strchr(n, '/')) == NULL)
51836+ endp = n + strlen(n);
51837+
51838+ if (c == '[') {
51839+ for (--p; n < endp; ++n)
51840+ if (!glob_match(p, n))
51841+ return 0;
51842+ } else if (c == '/') {
51843+ while (*n != '\0' && *n != '/')
51844+ ++n;
51845+ if (*n == '/' && !glob_match(p, n + 1))
51846+ return 0;
51847+ } else {
51848+ for (--p; n < endp; ++n)
51849+ if (*n == c && !glob_match(p, n))
51850+ return 0;
51851+ }
51852+
51853+ return 1;
51854+ }
51855+ case '[':
51856+ {
51857+ int not;
51858+ char cold;
51859+
51860+ if (*n == '\0' || *n == '/')
51861+ return 1;
51862+
51863+ not = (*p == '!' || *p == '^');
51864+ if (not)
51865+ ++p;
51866+
51867+ c = *p++;
51868+ for (;;) {
51869+ unsigned char fn = (unsigned char)*n;
51870+
51871+ if (c == '\0')
51872+ return 1;
51873+ else {
51874+ if (c == fn)
51875+ goto matched;
51876+ cold = c;
51877+ c = *p++;
51878+
51879+ if (c == '-' && *p != ']') {
51880+ unsigned char cend = *p++;
51881+
51882+ if (cend == '\0')
51883+ return 1;
51884+
51885+ if (cold <= fn && fn <= cend)
51886+ goto matched;
51887+
51888+ c = *p++;
51889+ }
51890+ }
51891+
51892+ if (c == ']')
51893+ break;
51894+ }
51895+ if (!not)
51896+ return 1;
51897+ break;
51898+ matched:
51899+ while (c != ']') {
51900+ if (c == '\0')
51901+ return 1;
51902+
51903+ c = *p++;
51904+ }
51905+ if (not)
51906+ return 1;
51907+ }
51908+ break;
51909+ default:
51910+ if (c != *n)
51911+ return 1;
51912+ }
51913+
51914+ ++n;
51915+ }
51916+
51917+ if (*n == '\0')
51918+ return 0;
51919+
51920+ if (*n == '/')
51921+ return 0;
51922+
51923+ return 1;
51924+}
51925+
51926+static struct acl_object_label *
51927+chk_glob_label(struct acl_object_label *globbed,
51928+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
51929+{
51930+ struct acl_object_label *tmp;
51931+
51932+ if (*path == NULL)
51933+ *path = gr_to_filename_nolock(dentry, mnt);
51934+
51935+ tmp = globbed;
51936+
51937+ while (tmp) {
51938+ if (!glob_match(tmp->filename, *path))
51939+ return tmp;
51940+ tmp = tmp->next;
51941+ }
51942+
51943+ return NULL;
51944+}
51945+
51946+static struct acl_object_label *
51947+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
51948+ const ino_t curr_ino, const dev_t curr_dev,
51949+ const struct acl_subject_label *subj, char **path, const int checkglob)
51950+{
51951+ struct acl_subject_label *tmpsubj;
51952+ struct acl_object_label *retval;
51953+ struct acl_object_label *retval2;
51954+
51955+ tmpsubj = (struct acl_subject_label *) subj;
51956+ read_lock(&gr_inode_lock);
51957+ do {
51958+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
51959+ if (retval) {
51960+ if (checkglob && retval->globbed) {
51961+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
51962+ if (retval2)
51963+ retval = retval2;
51964+ }
51965+ break;
51966+ }
51967+ } while ((tmpsubj = tmpsubj->parent_subject));
51968+ read_unlock(&gr_inode_lock);
51969+
51970+ return retval;
51971+}
51972+
51973+static __inline__ struct acl_object_label *
51974+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
51975+ struct dentry *curr_dentry,
51976+ const struct acl_subject_label *subj, char **path, const int checkglob)
51977+{
51978+ int newglob = checkglob;
51979+ ino_t inode;
51980+ dev_t device;
51981+
51982+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
51983+ as we don't want a / * rule to match instead of the / object
51984+ don't do this for create lookups that call this function though, since they're looking up
51985+ on the parent and thus need globbing checks on all paths
51986+ */
51987+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
51988+ newglob = GR_NO_GLOB;
51989+
51990+ spin_lock(&curr_dentry->d_lock);
51991+ inode = curr_dentry->d_inode->i_ino;
51992+ device = __get_dev(curr_dentry);
51993+ spin_unlock(&curr_dentry->d_lock);
51994+
51995+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
51996+}
51997+
51998+static struct acl_object_label *
51999+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52000+ const struct acl_subject_label *subj, char *path, const int checkglob)
52001+{
52002+ struct dentry *dentry = (struct dentry *) l_dentry;
52003+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52004+ struct mount *real_mnt = real_mount(mnt);
52005+ struct acl_object_label *retval;
52006+ struct dentry *parent;
52007+
52008+ write_seqlock(&rename_lock);
52009+ br_read_lock(vfsmount_lock);
52010+
52011+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
52012+#ifdef CONFIG_NET
52013+ mnt == sock_mnt ||
52014+#endif
52015+#ifdef CONFIG_HUGETLBFS
52016+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
52017+#endif
52018+ /* ignore Eric Biederman */
52019+ IS_PRIVATE(l_dentry->d_inode))) {
52020+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
52021+ goto out;
52022+ }
52023+
52024+ for (;;) {
52025+ if (dentry == real_root.dentry && mnt == real_root.mnt)
52026+ break;
52027+
52028+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52029+ if (!mnt_has_parent(real_mnt))
52030+ break;
52031+
52032+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52033+ if (retval != NULL)
52034+ goto out;
52035+
52036+ dentry = real_mnt->mnt_mountpoint;
52037+ real_mnt = real_mnt->mnt_parent;
52038+ mnt = &real_mnt->mnt;
52039+ continue;
52040+ }
52041+
52042+ parent = dentry->d_parent;
52043+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52044+ if (retval != NULL)
52045+ goto out;
52046+
52047+ dentry = parent;
52048+ }
52049+
52050+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52051+
52052+ /* real_root is pinned so we don't have to hold a reference */
52053+ if (retval == NULL)
52054+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
52055+out:
52056+ br_read_unlock(vfsmount_lock);
52057+ write_sequnlock(&rename_lock);
52058+
52059+ BUG_ON(retval == NULL);
52060+
52061+ return retval;
52062+}
52063+
52064+static __inline__ struct acl_object_label *
52065+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52066+ const struct acl_subject_label *subj)
52067+{
52068+ char *path = NULL;
52069+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
52070+}
52071+
52072+static __inline__ struct acl_object_label *
52073+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52074+ const struct acl_subject_label *subj)
52075+{
52076+ char *path = NULL;
52077+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
52078+}
52079+
52080+static __inline__ struct acl_object_label *
52081+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52082+ const struct acl_subject_label *subj, char *path)
52083+{
52084+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
52085+}
52086+
52087+static struct acl_subject_label *
52088+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52089+ const struct acl_role_label *role)
52090+{
52091+ struct dentry *dentry = (struct dentry *) l_dentry;
52092+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52093+ struct mount *real_mnt = real_mount(mnt);
52094+ struct acl_subject_label *retval;
52095+ struct dentry *parent;
52096+
52097+ write_seqlock(&rename_lock);
52098+ br_read_lock(vfsmount_lock);
52099+
52100+ for (;;) {
52101+ if (dentry == real_root.dentry && mnt == real_root.mnt)
52102+ break;
52103+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52104+ if (!mnt_has_parent(real_mnt))
52105+ break;
52106+
52107+ spin_lock(&dentry->d_lock);
52108+ read_lock(&gr_inode_lock);
52109+ retval =
52110+ lookup_acl_subj_label(dentry->d_inode->i_ino,
52111+ __get_dev(dentry), role);
52112+ read_unlock(&gr_inode_lock);
52113+ spin_unlock(&dentry->d_lock);
52114+ if (retval != NULL)
52115+ goto out;
52116+
52117+ dentry = real_mnt->mnt_mountpoint;
52118+ real_mnt = real_mnt->mnt_parent;
52119+ mnt = &real_mnt->mnt;
52120+ continue;
52121+ }
52122+
52123+ spin_lock(&dentry->d_lock);
52124+ read_lock(&gr_inode_lock);
52125+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52126+ __get_dev(dentry), role);
52127+ read_unlock(&gr_inode_lock);
52128+ parent = dentry->d_parent;
52129+ spin_unlock(&dentry->d_lock);
52130+
52131+ if (retval != NULL)
52132+ goto out;
52133+
52134+ dentry = parent;
52135+ }
52136+
52137+ spin_lock(&dentry->d_lock);
52138+ read_lock(&gr_inode_lock);
52139+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52140+ __get_dev(dentry), role);
52141+ read_unlock(&gr_inode_lock);
52142+ spin_unlock(&dentry->d_lock);
52143+
52144+ if (unlikely(retval == NULL)) {
52145+ /* real_root is pinned, we don't need to hold a reference */
52146+ read_lock(&gr_inode_lock);
52147+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
52148+ __get_dev(real_root.dentry), role);
52149+ read_unlock(&gr_inode_lock);
52150+ }
52151+out:
52152+ br_read_unlock(vfsmount_lock);
52153+ write_sequnlock(&rename_lock);
52154+
52155+ BUG_ON(retval == NULL);
52156+
52157+ return retval;
52158+}
52159+
52160+static void
52161+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
52162+{
52163+ struct task_struct *task = current;
52164+ const struct cred *cred = current_cred();
52165+
52166+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
52167+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52168+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52169+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
52170+
52171+ return;
52172+}
52173+
52174+static void
52175+gr_log_learn_id_change(const char type, const unsigned int real,
52176+ const unsigned int effective, const unsigned int fs)
52177+{
52178+ struct task_struct *task = current;
52179+ const struct cred *cred = current_cred();
52180+
52181+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
52182+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52183+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52184+ type, real, effective, fs, &task->signal->saved_ip);
52185+
52186+ return;
52187+}
52188+
52189+__u32
52190+gr_search_file(const struct dentry * dentry, const __u32 mode,
52191+ const struct vfsmount * mnt)
52192+{
52193+ __u32 retval = mode;
52194+ struct acl_subject_label *curracl;
52195+ struct acl_object_label *currobj;
52196+
52197+ if (unlikely(!(gr_status & GR_READY)))
52198+ return (mode & ~GR_AUDITS);
52199+
52200+ curracl = current->acl;
52201+
52202+ currobj = chk_obj_label(dentry, mnt, curracl);
52203+ retval = currobj->mode & mode;
52204+
52205+ /* if we're opening a specified transfer file for writing
52206+ (e.g. /dev/initctl), then transfer our role to init
52207+ */
52208+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
52209+ current->role->roletype & GR_ROLE_PERSIST)) {
52210+ struct task_struct *task = init_pid_ns.child_reaper;
52211+
52212+ if (task->role != current->role) {
52213+ task->acl_sp_role = 0;
52214+ task->acl_role_id = current->acl_role_id;
52215+ task->role = current->role;
52216+ rcu_read_lock();
52217+ read_lock(&grsec_exec_file_lock);
52218+ gr_apply_subject_to_task(task);
52219+ read_unlock(&grsec_exec_file_lock);
52220+ rcu_read_unlock();
52221+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
52222+ }
52223+ }
52224+
52225+ if (unlikely
52226+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
52227+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
52228+ __u32 new_mode = mode;
52229+
52230+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52231+
52232+ retval = new_mode;
52233+
52234+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
52235+ new_mode |= GR_INHERIT;
52236+
52237+ if (!(mode & GR_NOLEARN))
52238+ gr_log_learn(dentry, mnt, new_mode);
52239+ }
52240+
52241+ return retval;
52242+}
52243+
52244+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
52245+ const struct dentry *parent,
52246+ const struct vfsmount *mnt)
52247+{
52248+ struct name_entry *match;
52249+ struct acl_object_label *matchpo;
52250+ struct acl_subject_label *curracl;
52251+ char *path;
52252+
52253+ if (unlikely(!(gr_status & GR_READY)))
52254+ return NULL;
52255+
52256+ preempt_disable();
52257+ path = gr_to_filename_rbac(new_dentry, mnt);
52258+ match = lookup_name_entry_create(path);
52259+
52260+ curracl = current->acl;
52261+
52262+ if (match) {
52263+ read_lock(&gr_inode_lock);
52264+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
52265+ read_unlock(&gr_inode_lock);
52266+
52267+ if (matchpo) {
52268+ preempt_enable();
52269+ return matchpo;
52270+ }
52271+ }
52272+
52273+ // lookup parent
52274+
52275+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
52276+
52277+ preempt_enable();
52278+ return matchpo;
52279+}
52280+
52281+__u32
52282+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
52283+ const struct vfsmount * mnt, const __u32 mode)
52284+{
52285+ struct acl_object_label *matchpo;
52286+ __u32 retval;
52287+
52288+ if (unlikely(!(gr_status & GR_READY)))
52289+ return (mode & ~GR_AUDITS);
52290+
52291+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
52292+
52293+ retval = matchpo->mode & mode;
52294+
52295+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
52296+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
52297+ __u32 new_mode = mode;
52298+
52299+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52300+
52301+ gr_log_learn(new_dentry, mnt, new_mode);
52302+ return new_mode;
52303+ }
52304+
52305+ return retval;
52306+}
52307+
52308+__u32
52309+gr_check_link(const struct dentry * new_dentry,
52310+ const struct dentry * parent_dentry,
52311+ const struct vfsmount * parent_mnt,
52312+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
52313+{
52314+ struct acl_object_label *obj;
52315+ __u32 oldmode, newmode;
52316+ __u32 needmode;
52317+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
52318+ GR_DELETE | GR_INHERIT;
52319+
52320+ if (unlikely(!(gr_status & GR_READY)))
52321+ return (GR_CREATE | GR_LINK);
52322+
52323+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
52324+ oldmode = obj->mode;
52325+
52326+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
52327+ newmode = obj->mode;
52328+
52329+ needmode = newmode & checkmodes;
52330+
52331+ // old name for hardlink must have at least the permissions of the new name
52332+ if ((oldmode & needmode) != needmode)
52333+ goto bad;
52334+
52335+ // if old name had restrictions/auditing, make sure the new name does as well
52336+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
52337+
52338+ // don't allow hardlinking of suid/sgid files without permission
52339+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52340+ needmode |= GR_SETID;
52341+
52342+ if ((newmode & needmode) != needmode)
52343+ goto bad;
52344+
52345+ // enforce minimum permissions
52346+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
52347+ return newmode;
52348+bad:
52349+ needmode = oldmode;
52350+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52351+ needmode |= GR_SETID;
52352+
52353+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
52354+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
52355+ return (GR_CREATE | GR_LINK);
52356+ } else if (newmode & GR_SUPPRESS)
52357+ return GR_SUPPRESS;
52358+ else
52359+ return 0;
52360+}
52361+
52362+int
52363+gr_check_hidden_task(const struct task_struct *task)
52364+{
52365+ if (unlikely(!(gr_status & GR_READY)))
52366+ return 0;
52367+
52368+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
52369+ return 1;
52370+
52371+ return 0;
52372+}
52373+
52374+int
52375+gr_check_protected_task(const struct task_struct *task)
52376+{
52377+ if (unlikely(!(gr_status & GR_READY) || !task))
52378+ return 0;
52379+
52380+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52381+ task->acl != current->acl)
52382+ return 1;
52383+
52384+ return 0;
52385+}
52386+
52387+int
52388+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
52389+{
52390+ struct task_struct *p;
52391+ int ret = 0;
52392+
52393+ if (unlikely(!(gr_status & GR_READY) || !pid))
52394+ return ret;
52395+
52396+ read_lock(&tasklist_lock);
52397+ do_each_pid_task(pid, type, p) {
52398+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52399+ p->acl != current->acl) {
52400+ ret = 1;
52401+ goto out;
52402+ }
52403+ } while_each_pid_task(pid, type, p);
52404+out:
52405+ read_unlock(&tasklist_lock);
52406+
52407+ return ret;
52408+}
52409+
52410+void
52411+gr_copy_label(struct task_struct *tsk)
52412+{
52413+ /* plain copying of fields is already done by dup_task_struct */
52414+ tsk->signal->used_accept = 0;
52415+ tsk->acl_sp_role = 0;
52416+ //tsk->acl_role_id = current->acl_role_id;
52417+ //tsk->acl = current->acl;
52418+ //tsk->role = current->role;
52419+ tsk->signal->curr_ip = current->signal->curr_ip;
52420+ tsk->signal->saved_ip = current->signal->saved_ip;
52421+ if (current->exec_file)
52422+ get_file(current->exec_file);
52423+ //tsk->exec_file = current->exec_file;
52424+ //tsk->is_writable = current->is_writable;
52425+ if (unlikely(current->signal->used_accept)) {
52426+ current->signal->curr_ip = 0;
52427+ current->signal->saved_ip = 0;
52428+ }
52429+
52430+ return;
52431+}
52432+
52433+static void
52434+gr_set_proc_res(struct task_struct *task)
52435+{
52436+ struct acl_subject_label *proc;
52437+ unsigned short i;
52438+
52439+ proc = task->acl;
52440+
52441+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
52442+ return;
52443+
52444+ for (i = 0; i < RLIM_NLIMITS; i++) {
52445+ if (!(proc->resmask & (1 << i)))
52446+ continue;
52447+
52448+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
52449+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
52450+ }
52451+
52452+ return;
52453+}
52454+
52455+extern int __gr_process_user_ban(struct user_struct *user);
52456+
52457+int
52458+gr_check_user_change(int real, int effective, int fs)
52459+{
52460+ unsigned int i;
52461+ __u16 num;
52462+ uid_t *uidlist;
52463+ int curuid;
52464+ int realok = 0;
52465+ int effectiveok = 0;
52466+ int fsok = 0;
52467+
52468+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52469+ struct user_struct *user;
52470+
52471+ if (real == -1)
52472+ goto skipit;
52473+
52474+ user = find_user(real);
52475+ if (user == NULL)
52476+ goto skipit;
52477+
52478+ if (__gr_process_user_ban(user)) {
52479+ /* for find_user */
52480+ free_uid(user);
52481+ return 1;
52482+ }
52483+
52484+ /* for find_user */
52485+ free_uid(user);
52486+
52487+skipit:
52488+#endif
52489+
52490+ if (unlikely(!(gr_status & GR_READY)))
52491+ return 0;
52492+
52493+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52494+ gr_log_learn_id_change('u', real, effective, fs);
52495+
52496+ num = current->acl->user_trans_num;
52497+ uidlist = current->acl->user_transitions;
52498+
52499+ if (uidlist == NULL)
52500+ return 0;
52501+
52502+ if (real == -1)
52503+ realok = 1;
52504+ if (effective == -1)
52505+ effectiveok = 1;
52506+ if (fs == -1)
52507+ fsok = 1;
52508+
52509+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
52510+ for (i = 0; i < num; i++) {
52511+ curuid = (int)uidlist[i];
52512+ if (real == curuid)
52513+ realok = 1;
52514+ if (effective == curuid)
52515+ effectiveok = 1;
52516+ if (fs == curuid)
52517+ fsok = 1;
52518+ }
52519+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
52520+ for (i = 0; i < num; i++) {
52521+ curuid = (int)uidlist[i];
52522+ if (real == curuid)
52523+ break;
52524+ if (effective == curuid)
52525+ break;
52526+ if (fs == curuid)
52527+ break;
52528+ }
52529+ /* not in deny list */
52530+ if (i == num) {
52531+ realok = 1;
52532+ effectiveok = 1;
52533+ fsok = 1;
52534+ }
52535+ }
52536+
52537+ if (realok && effectiveok && fsok)
52538+ return 0;
52539+ else {
52540+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
52541+ return 1;
52542+ }
52543+}
52544+
52545+int
52546+gr_check_group_change(int real, int effective, int fs)
52547+{
52548+ unsigned int i;
52549+ __u16 num;
52550+ gid_t *gidlist;
52551+ int curgid;
52552+ int realok = 0;
52553+ int effectiveok = 0;
52554+ int fsok = 0;
52555+
52556+ if (unlikely(!(gr_status & GR_READY)))
52557+ return 0;
52558+
52559+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52560+ gr_log_learn_id_change('g', real, effective, fs);
52561+
52562+ num = current->acl->group_trans_num;
52563+ gidlist = current->acl->group_transitions;
52564+
52565+ if (gidlist == NULL)
52566+ return 0;
52567+
52568+ if (real == -1)
52569+ realok = 1;
52570+ if (effective == -1)
52571+ effectiveok = 1;
52572+ if (fs == -1)
52573+ fsok = 1;
52574+
52575+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
52576+ for (i = 0; i < num; i++) {
52577+ curgid = (int)gidlist[i];
52578+ if (real == curgid)
52579+ realok = 1;
52580+ if (effective == curgid)
52581+ effectiveok = 1;
52582+ if (fs == curgid)
52583+ fsok = 1;
52584+ }
52585+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
52586+ for (i = 0; i < num; i++) {
52587+ curgid = (int)gidlist[i];
52588+ if (real == curgid)
52589+ break;
52590+ if (effective == curgid)
52591+ break;
52592+ if (fs == curgid)
52593+ break;
52594+ }
52595+ /* not in deny list */
52596+ if (i == num) {
52597+ realok = 1;
52598+ effectiveok = 1;
52599+ fsok = 1;
52600+ }
52601+ }
52602+
52603+ if (realok && effectiveok && fsok)
52604+ return 0;
52605+ else {
52606+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
52607+ return 1;
52608+ }
52609+}
52610+
52611+extern int gr_acl_is_capable(const int cap);
52612+
52613+void
52614+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
52615+{
52616+ struct acl_role_label *role = task->role;
52617+ struct acl_subject_label *subj = NULL;
52618+ struct acl_object_label *obj;
52619+ struct file *filp;
52620+
52621+ if (unlikely(!(gr_status & GR_READY)))
52622+ return;
52623+
52624+ filp = task->exec_file;
52625+
52626+ /* kernel process, we'll give them the kernel role */
52627+ if (unlikely(!filp)) {
52628+ task->role = kernel_role;
52629+ task->acl = kernel_role->root_label;
52630+ return;
52631+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
52632+ role = lookup_acl_role_label(task, uid, gid);
52633+
52634+ /* don't change the role if we're not a privileged process */
52635+ if (role && task->role != role &&
52636+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
52637+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
52638+ return;
52639+
52640+ /* perform subject lookup in possibly new role
52641+ we can use this result below in the case where role == task->role
52642+ */
52643+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
52644+
52645+ /* if we changed uid/gid, but result in the same role
52646+ and are using inheritance, don't lose the inherited subject
52647+ if current subject is other than what normal lookup
52648+ would result in, we arrived via inheritance, don't
52649+ lose subject
52650+ */
52651+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
52652+ (subj == task->acl)))
52653+ task->acl = subj;
52654+
52655+ task->role = role;
52656+
52657+ task->is_writable = 0;
52658+
52659+ /* ignore additional mmap checks for processes that are writable
52660+ by the default ACL */
52661+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52662+ if (unlikely(obj->mode & GR_WRITE))
52663+ task->is_writable = 1;
52664+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
52665+ if (unlikely(obj->mode & GR_WRITE))
52666+ task->is_writable = 1;
52667+
52668+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52669+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
52670+#endif
52671+
52672+ gr_set_proc_res(task);
52673+
52674+ return;
52675+}
52676+
52677+int
52678+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
52679+ const int unsafe_flags)
52680+{
52681+ struct task_struct *task = current;
52682+ struct acl_subject_label *newacl;
52683+ struct acl_object_label *obj;
52684+ __u32 retmode;
52685+
52686+ if (unlikely(!(gr_status & GR_READY)))
52687+ return 0;
52688+
52689+ newacl = chk_subj_label(dentry, mnt, task->role);
52690+
52691+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
52692+ did an exec
52693+ */
52694+ rcu_read_lock();
52695+ read_lock(&tasklist_lock);
52696+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
52697+ (task->parent->acl->mode & GR_POVERRIDE))) {
52698+ read_unlock(&tasklist_lock);
52699+ rcu_read_unlock();
52700+ goto skip_check;
52701+ }
52702+ read_unlock(&tasklist_lock);
52703+ rcu_read_unlock();
52704+
52705+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
52706+ !(task->role->roletype & GR_ROLE_GOD) &&
52707+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
52708+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
52709+ if (unsafe_flags & LSM_UNSAFE_SHARE)
52710+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
52711+ else
52712+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
52713+ return -EACCES;
52714+ }
52715+
52716+skip_check:
52717+
52718+ obj = chk_obj_label(dentry, mnt, task->acl);
52719+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
52720+
52721+ if (!(task->acl->mode & GR_INHERITLEARN) &&
52722+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
52723+ if (obj->nested)
52724+ task->acl = obj->nested;
52725+ else
52726+ task->acl = newacl;
52727+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
52728+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
52729+
52730+ task->is_writable = 0;
52731+
52732+ /* ignore additional mmap checks for processes that are writable
52733+ by the default ACL */
52734+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
52735+ if (unlikely(obj->mode & GR_WRITE))
52736+ task->is_writable = 1;
52737+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
52738+ if (unlikely(obj->mode & GR_WRITE))
52739+ task->is_writable = 1;
52740+
52741+ gr_set_proc_res(task);
52742+
52743+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52744+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
52745+#endif
52746+ return 0;
52747+}
52748+
52749+/* always called with valid inodev ptr */
52750+static void
52751+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
52752+{
52753+ struct acl_object_label *matchpo;
52754+ struct acl_subject_label *matchps;
52755+ struct acl_subject_label *subj;
52756+ struct acl_role_label *role;
52757+ unsigned int x;
52758+
52759+ FOR_EACH_ROLE_START(role)
52760+ FOR_EACH_SUBJECT_START(role, subj, x)
52761+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
52762+ matchpo->mode |= GR_DELETED;
52763+ FOR_EACH_SUBJECT_END(subj,x)
52764+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
52765+ if (subj->inode == ino && subj->device == dev)
52766+ subj->mode |= GR_DELETED;
52767+ FOR_EACH_NESTED_SUBJECT_END(subj)
52768+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
52769+ matchps->mode |= GR_DELETED;
52770+ FOR_EACH_ROLE_END(role)
52771+
52772+ inodev->nentry->deleted = 1;
52773+
52774+ return;
52775+}
52776+
52777+void
52778+gr_handle_delete(const ino_t ino, const dev_t dev)
52779+{
52780+ struct inodev_entry *inodev;
52781+
52782+ if (unlikely(!(gr_status & GR_READY)))
52783+ return;
52784+
52785+ write_lock(&gr_inode_lock);
52786+ inodev = lookup_inodev_entry(ino, dev);
52787+ if (inodev != NULL)
52788+ do_handle_delete(inodev, ino, dev);
52789+ write_unlock(&gr_inode_lock);
52790+
52791+ return;
52792+}
52793+
52794+static void
52795+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
52796+ const ino_t newinode, const dev_t newdevice,
52797+ struct acl_subject_label *subj)
52798+{
52799+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
52800+ struct acl_object_label *match;
52801+
52802+ match = subj->obj_hash[index];
52803+
52804+ while (match && (match->inode != oldinode ||
52805+ match->device != olddevice ||
52806+ !(match->mode & GR_DELETED)))
52807+ match = match->next;
52808+
52809+ if (match && (match->inode == oldinode)
52810+ && (match->device == olddevice)
52811+ && (match->mode & GR_DELETED)) {
52812+ if (match->prev == NULL) {
52813+ subj->obj_hash[index] = match->next;
52814+ if (match->next != NULL)
52815+ match->next->prev = NULL;
52816+ } else {
52817+ match->prev->next = match->next;
52818+ if (match->next != NULL)
52819+ match->next->prev = match->prev;
52820+ }
52821+ match->prev = NULL;
52822+ match->next = NULL;
52823+ match->inode = newinode;
52824+ match->device = newdevice;
52825+ match->mode &= ~GR_DELETED;
52826+
52827+ insert_acl_obj_label(match, subj);
52828+ }
52829+
52830+ return;
52831+}
52832+
52833+static void
52834+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
52835+ const ino_t newinode, const dev_t newdevice,
52836+ struct acl_role_label *role)
52837+{
52838+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
52839+ struct acl_subject_label *match;
52840+
52841+ match = role->subj_hash[index];
52842+
52843+ while (match && (match->inode != oldinode ||
52844+ match->device != olddevice ||
52845+ !(match->mode & GR_DELETED)))
52846+ match = match->next;
52847+
52848+ if (match && (match->inode == oldinode)
52849+ && (match->device == olddevice)
52850+ && (match->mode & GR_DELETED)) {
52851+ if (match->prev == NULL) {
52852+ role->subj_hash[index] = match->next;
52853+ if (match->next != NULL)
52854+ match->next->prev = NULL;
52855+ } else {
52856+ match->prev->next = match->next;
52857+ if (match->next != NULL)
52858+ match->next->prev = match->prev;
52859+ }
52860+ match->prev = NULL;
52861+ match->next = NULL;
52862+ match->inode = newinode;
52863+ match->device = newdevice;
52864+ match->mode &= ~GR_DELETED;
52865+
52866+ insert_acl_subj_label(match, role);
52867+ }
52868+
52869+ return;
52870+}
52871+
52872+static void
52873+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
52874+ const ino_t newinode, const dev_t newdevice)
52875+{
52876+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
52877+ struct inodev_entry *match;
52878+
52879+ match = inodev_set.i_hash[index];
52880+
52881+ while (match && (match->nentry->inode != oldinode ||
52882+ match->nentry->device != olddevice || !match->nentry->deleted))
52883+ match = match->next;
52884+
52885+ if (match && (match->nentry->inode == oldinode)
52886+ && (match->nentry->device == olddevice) &&
52887+ match->nentry->deleted) {
52888+ if (match->prev == NULL) {
52889+ inodev_set.i_hash[index] = match->next;
52890+ if (match->next != NULL)
52891+ match->next->prev = NULL;
52892+ } else {
52893+ match->prev->next = match->next;
52894+ if (match->next != NULL)
52895+ match->next->prev = match->prev;
52896+ }
52897+ match->prev = NULL;
52898+ match->next = NULL;
52899+ match->nentry->inode = newinode;
52900+ match->nentry->device = newdevice;
52901+ match->nentry->deleted = 0;
52902+
52903+ insert_inodev_entry(match);
52904+ }
52905+
52906+ return;
52907+}
52908+
52909+static void
52910+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
52911+{
52912+ struct acl_subject_label *subj;
52913+ struct acl_role_label *role;
52914+ unsigned int x;
52915+
52916+ FOR_EACH_ROLE_START(role)
52917+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
52918+
52919+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
52920+ if ((subj->inode == ino) && (subj->device == dev)) {
52921+ subj->inode = ino;
52922+ subj->device = dev;
52923+ }
52924+ FOR_EACH_NESTED_SUBJECT_END(subj)
52925+ FOR_EACH_SUBJECT_START(role, subj, x)
52926+ update_acl_obj_label(matchn->inode, matchn->device,
52927+ ino, dev, subj);
52928+ FOR_EACH_SUBJECT_END(subj,x)
52929+ FOR_EACH_ROLE_END(role)
52930+
52931+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
52932+
52933+ return;
52934+}
52935+
52936+static void
52937+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
52938+ const struct vfsmount *mnt)
52939+{
52940+ ino_t ino = dentry->d_inode->i_ino;
52941+ dev_t dev = __get_dev(dentry);
52942+
52943+ __do_handle_create(matchn, ino, dev);
52944+
52945+ return;
52946+}
52947+
52948+void
52949+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
52950+{
52951+ struct name_entry *matchn;
52952+
52953+ if (unlikely(!(gr_status & GR_READY)))
52954+ return;
52955+
52956+ preempt_disable();
52957+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
52958+
52959+ if (unlikely((unsigned long)matchn)) {
52960+ write_lock(&gr_inode_lock);
52961+ do_handle_create(matchn, dentry, mnt);
52962+ write_unlock(&gr_inode_lock);
52963+ }
52964+ preempt_enable();
52965+
52966+ return;
52967+}
52968+
52969+void
52970+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
52971+{
52972+ struct name_entry *matchn;
52973+
52974+ if (unlikely(!(gr_status & GR_READY)))
52975+ return;
52976+
52977+ preempt_disable();
52978+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
52979+
52980+ if (unlikely((unsigned long)matchn)) {
52981+ write_lock(&gr_inode_lock);
52982+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
52983+ write_unlock(&gr_inode_lock);
52984+ }
52985+ preempt_enable();
52986+
52987+ return;
52988+}
52989+
52990+void
52991+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
52992+ struct dentry *old_dentry,
52993+ struct dentry *new_dentry,
52994+ struct vfsmount *mnt, const __u8 replace)
52995+{
52996+ struct name_entry *matchn;
52997+ struct inodev_entry *inodev;
52998+ struct inode *inode = new_dentry->d_inode;
52999+ ino_t old_ino = old_dentry->d_inode->i_ino;
53000+ dev_t old_dev = __get_dev(old_dentry);
53001+
53002+ /* vfs_rename swaps the name and parent link for old_dentry and
53003+ new_dentry
53004+ at this point, old_dentry has the new name, parent link, and inode
53005+ for the renamed file
53006+ if a file is being replaced by a rename, new_dentry has the inode
53007+ and name for the replaced file
53008+ */
53009+
53010+ if (unlikely(!(gr_status & GR_READY)))
53011+ return;
53012+
53013+ preempt_disable();
53014+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
53015+
53016+ /* we wouldn't have to check d_inode if it weren't for
53017+ NFS silly-renaming
53018+ */
53019+
53020+ write_lock(&gr_inode_lock);
53021+ if (unlikely(replace && inode)) {
53022+ ino_t new_ino = inode->i_ino;
53023+ dev_t new_dev = __get_dev(new_dentry);
53024+
53025+ inodev = lookup_inodev_entry(new_ino, new_dev);
53026+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
53027+ do_handle_delete(inodev, new_ino, new_dev);
53028+ }
53029+
53030+ inodev = lookup_inodev_entry(old_ino, old_dev);
53031+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
53032+ do_handle_delete(inodev, old_ino, old_dev);
53033+
53034+ if (unlikely((unsigned long)matchn))
53035+ do_handle_create(matchn, old_dentry, mnt);
53036+
53037+ write_unlock(&gr_inode_lock);
53038+ preempt_enable();
53039+
53040+ return;
53041+}
53042+
53043+static int
53044+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
53045+ unsigned char **sum)
53046+{
53047+ struct acl_role_label *r;
53048+ struct role_allowed_ip *ipp;
53049+ struct role_transition *trans;
53050+ unsigned int i;
53051+ int found = 0;
53052+ u32 curr_ip = current->signal->curr_ip;
53053+
53054+ current->signal->saved_ip = curr_ip;
53055+
53056+ /* check transition table */
53057+
53058+ for (trans = current->role->transitions; trans; trans = trans->next) {
53059+ if (!strcmp(rolename, trans->rolename)) {
53060+ found = 1;
53061+ break;
53062+ }
53063+ }
53064+
53065+ if (!found)
53066+ return 0;
53067+
53068+ /* handle special roles that do not require authentication
53069+ and check ip */
53070+
53071+ FOR_EACH_ROLE_START(r)
53072+ if (!strcmp(rolename, r->rolename) &&
53073+ (r->roletype & GR_ROLE_SPECIAL)) {
53074+ found = 0;
53075+ if (r->allowed_ips != NULL) {
53076+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
53077+ if ((ntohl(curr_ip) & ipp->netmask) ==
53078+ (ntohl(ipp->addr) & ipp->netmask))
53079+ found = 1;
53080+ }
53081+ } else
53082+ found = 2;
53083+ if (!found)
53084+ return 0;
53085+
53086+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
53087+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
53088+ *salt = NULL;
53089+ *sum = NULL;
53090+ return 1;
53091+ }
53092+ }
53093+ FOR_EACH_ROLE_END(r)
53094+
53095+ for (i = 0; i < num_sprole_pws; i++) {
53096+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
53097+ *salt = acl_special_roles[i]->salt;
53098+ *sum = acl_special_roles[i]->sum;
53099+ return 1;
53100+ }
53101+ }
53102+
53103+ return 0;
53104+}
53105+
53106+static void
53107+assign_special_role(char *rolename)
53108+{
53109+ struct acl_object_label *obj;
53110+ struct acl_role_label *r;
53111+ struct acl_role_label *assigned = NULL;
53112+ struct task_struct *tsk;
53113+ struct file *filp;
53114+
53115+ FOR_EACH_ROLE_START(r)
53116+ if (!strcmp(rolename, r->rolename) &&
53117+ (r->roletype & GR_ROLE_SPECIAL)) {
53118+ assigned = r;
53119+ break;
53120+ }
53121+ FOR_EACH_ROLE_END(r)
53122+
53123+ if (!assigned)
53124+ return;
53125+
53126+ read_lock(&tasklist_lock);
53127+ read_lock(&grsec_exec_file_lock);
53128+
53129+ tsk = current->real_parent;
53130+ if (tsk == NULL)
53131+ goto out_unlock;
53132+
53133+ filp = tsk->exec_file;
53134+ if (filp == NULL)
53135+ goto out_unlock;
53136+
53137+ tsk->is_writable = 0;
53138+
53139+ tsk->acl_sp_role = 1;
53140+ tsk->acl_role_id = ++acl_sp_role_value;
53141+ tsk->role = assigned;
53142+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
53143+
53144+ /* ignore additional mmap checks for processes that are writable
53145+ by the default ACL */
53146+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53147+ if (unlikely(obj->mode & GR_WRITE))
53148+ tsk->is_writable = 1;
53149+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
53150+ if (unlikely(obj->mode & GR_WRITE))
53151+ tsk->is_writable = 1;
53152+
53153+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53154+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
53155+#endif
53156+
53157+out_unlock:
53158+ read_unlock(&grsec_exec_file_lock);
53159+ read_unlock(&tasklist_lock);
53160+ return;
53161+}
53162+
53163+int gr_check_secure_terminal(struct task_struct *task)
53164+{
53165+ struct task_struct *p, *p2, *p3;
53166+ struct files_struct *files;
53167+ struct fdtable *fdt;
53168+ struct file *our_file = NULL, *file;
53169+ int i;
53170+
53171+ if (task->signal->tty == NULL)
53172+ return 1;
53173+
53174+ files = get_files_struct(task);
53175+ if (files != NULL) {
53176+ rcu_read_lock();
53177+ fdt = files_fdtable(files);
53178+ for (i=0; i < fdt->max_fds; i++) {
53179+ file = fcheck_files(files, i);
53180+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
53181+ get_file(file);
53182+ our_file = file;
53183+ }
53184+ }
53185+ rcu_read_unlock();
53186+ put_files_struct(files);
53187+ }
53188+
53189+ if (our_file == NULL)
53190+ return 1;
53191+
53192+ read_lock(&tasklist_lock);
53193+ do_each_thread(p2, p) {
53194+ files = get_files_struct(p);
53195+ if (files == NULL ||
53196+ (p->signal && p->signal->tty == task->signal->tty)) {
53197+ if (files != NULL)
53198+ put_files_struct(files);
53199+ continue;
53200+ }
53201+ rcu_read_lock();
53202+ fdt = files_fdtable(files);
53203+ for (i=0; i < fdt->max_fds; i++) {
53204+ file = fcheck_files(files, i);
53205+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
53206+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
53207+ p3 = task;
53208+ while (p3->pid > 0) {
53209+ if (p3 == p)
53210+ break;
53211+ p3 = p3->real_parent;
53212+ }
53213+ if (p3 == p)
53214+ break;
53215+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
53216+ gr_handle_alertkill(p);
53217+ rcu_read_unlock();
53218+ put_files_struct(files);
53219+ read_unlock(&tasklist_lock);
53220+ fput(our_file);
53221+ return 0;
53222+ }
53223+ }
53224+ rcu_read_unlock();
53225+ put_files_struct(files);
53226+ } while_each_thread(p2, p);
53227+ read_unlock(&tasklist_lock);
53228+
53229+ fput(our_file);
53230+ return 1;
53231+}
53232+
53233+ssize_t
53234+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
53235+{
53236+ struct gr_arg_wrapper uwrap;
53237+ unsigned char *sprole_salt = NULL;
53238+ unsigned char *sprole_sum = NULL;
53239+ int error = sizeof (struct gr_arg_wrapper);
53240+ int error2 = 0;
53241+
53242+ mutex_lock(&gr_dev_mutex);
53243+
53244+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
53245+ error = -EPERM;
53246+ goto out;
53247+ }
53248+
53249+ if (count != sizeof (struct gr_arg_wrapper)) {
53250+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
53251+ error = -EINVAL;
53252+ goto out;
53253+ }
53254+
53255+
53256+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
53257+ gr_auth_expires = 0;
53258+ gr_auth_attempts = 0;
53259+ }
53260+
53261+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
53262+ error = -EFAULT;
53263+ goto out;
53264+ }
53265+
53266+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
53267+ error = -EINVAL;
53268+ goto out;
53269+ }
53270+
53271+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
53272+ error = -EFAULT;
53273+ goto out;
53274+ }
53275+
53276+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53277+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53278+ time_after(gr_auth_expires, get_seconds())) {
53279+ error = -EBUSY;
53280+ goto out;
53281+ }
53282+
53283+ /* if non-root trying to do anything other than use a special role,
53284+ do not attempt authentication, do not count towards authentication
53285+ locking
53286+ */
53287+
53288+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
53289+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53290+ current_uid()) {
53291+ error = -EPERM;
53292+ goto out;
53293+ }
53294+
53295+ /* ensure pw and special role name are null terminated */
53296+
53297+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
53298+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
53299+
53300+ /* Okay.
53301+ * We have our enough of the argument structure..(we have yet
53302+ * to copy_from_user the tables themselves) . Copy the tables
53303+ * only if we need them, i.e. for loading operations. */
53304+
53305+ switch (gr_usermode->mode) {
53306+ case GR_STATUS:
53307+ if (gr_status & GR_READY) {
53308+ error = 1;
53309+ if (!gr_check_secure_terminal(current))
53310+ error = 3;
53311+ } else
53312+ error = 2;
53313+ goto out;
53314+ case GR_SHUTDOWN:
53315+ if ((gr_status & GR_READY)
53316+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53317+ pax_open_kernel();
53318+ gr_status &= ~GR_READY;
53319+ pax_close_kernel();
53320+
53321+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
53322+ free_variables();
53323+ memset(gr_usermode, 0, sizeof (struct gr_arg));
53324+ memset(gr_system_salt, 0, GR_SALT_LEN);
53325+ memset(gr_system_sum, 0, GR_SHA_LEN);
53326+ } else if (gr_status & GR_READY) {
53327+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
53328+ error = -EPERM;
53329+ } else {
53330+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
53331+ error = -EAGAIN;
53332+ }
53333+ break;
53334+ case GR_ENABLE:
53335+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
53336+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
53337+ else {
53338+ if (gr_status & GR_READY)
53339+ error = -EAGAIN;
53340+ else
53341+ error = error2;
53342+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
53343+ }
53344+ break;
53345+ case GR_RELOAD:
53346+ if (!(gr_status & GR_READY)) {
53347+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
53348+ error = -EAGAIN;
53349+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53350+ preempt_disable();
53351+
53352+ pax_open_kernel();
53353+ gr_status &= ~GR_READY;
53354+ pax_close_kernel();
53355+
53356+ free_variables();
53357+ if (!(error2 = gracl_init(gr_usermode))) {
53358+ preempt_enable();
53359+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
53360+ } else {
53361+ preempt_enable();
53362+ error = error2;
53363+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53364+ }
53365+ } else {
53366+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53367+ error = -EPERM;
53368+ }
53369+ break;
53370+ case GR_SEGVMOD:
53371+ if (unlikely(!(gr_status & GR_READY))) {
53372+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
53373+ error = -EAGAIN;
53374+ break;
53375+ }
53376+
53377+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53378+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
53379+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
53380+ struct acl_subject_label *segvacl;
53381+ segvacl =
53382+ lookup_acl_subj_label(gr_usermode->segv_inode,
53383+ gr_usermode->segv_device,
53384+ current->role);
53385+ if (segvacl) {
53386+ segvacl->crashes = 0;
53387+ segvacl->expires = 0;
53388+ }
53389+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
53390+ gr_remove_uid(gr_usermode->segv_uid);
53391+ }
53392+ } else {
53393+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
53394+ error = -EPERM;
53395+ }
53396+ break;
53397+ case GR_SPROLE:
53398+ case GR_SPROLEPAM:
53399+ if (unlikely(!(gr_status & GR_READY))) {
53400+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
53401+ error = -EAGAIN;
53402+ break;
53403+ }
53404+
53405+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
53406+ current->role->expires = 0;
53407+ current->role->auth_attempts = 0;
53408+ }
53409+
53410+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53411+ time_after(current->role->expires, get_seconds())) {
53412+ error = -EBUSY;
53413+ goto out;
53414+ }
53415+
53416+ if (lookup_special_role_auth
53417+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
53418+ && ((!sprole_salt && !sprole_sum)
53419+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
53420+ char *p = "";
53421+ assign_special_role(gr_usermode->sp_role);
53422+ read_lock(&tasklist_lock);
53423+ if (current->real_parent)
53424+ p = current->real_parent->role->rolename;
53425+ read_unlock(&tasklist_lock);
53426+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
53427+ p, acl_sp_role_value);
53428+ } else {
53429+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
53430+ error = -EPERM;
53431+ if(!(current->role->auth_attempts++))
53432+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53433+
53434+ goto out;
53435+ }
53436+ break;
53437+ case GR_UNSPROLE:
53438+ if (unlikely(!(gr_status & GR_READY))) {
53439+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
53440+ error = -EAGAIN;
53441+ break;
53442+ }
53443+
53444+ if (current->role->roletype & GR_ROLE_SPECIAL) {
53445+ char *p = "";
53446+ int i = 0;
53447+
53448+ read_lock(&tasklist_lock);
53449+ if (current->real_parent) {
53450+ p = current->real_parent->role->rolename;
53451+ i = current->real_parent->acl_role_id;
53452+ }
53453+ read_unlock(&tasklist_lock);
53454+
53455+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
53456+ gr_set_acls(1);
53457+ } else {
53458+ error = -EPERM;
53459+ goto out;
53460+ }
53461+ break;
53462+ default:
53463+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
53464+ error = -EINVAL;
53465+ break;
53466+ }
53467+
53468+ if (error != -EPERM)
53469+ goto out;
53470+
53471+ if(!(gr_auth_attempts++))
53472+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53473+
53474+ out:
53475+ mutex_unlock(&gr_dev_mutex);
53476+ return error;
53477+}
53478+
53479+/* must be called with
53480+ rcu_read_lock();
53481+ read_lock(&tasklist_lock);
53482+ read_lock(&grsec_exec_file_lock);
53483+*/
53484+int gr_apply_subject_to_task(struct task_struct *task)
53485+{
53486+ struct acl_object_label *obj;
53487+ char *tmpname;
53488+ struct acl_subject_label *tmpsubj;
53489+ struct file *filp;
53490+ struct name_entry *nmatch;
53491+
53492+ filp = task->exec_file;
53493+ if (filp == NULL)
53494+ return 0;
53495+
53496+ /* the following is to apply the correct subject
53497+ on binaries running when the RBAC system
53498+ is enabled, when the binaries have been
53499+ replaced or deleted since their execution
53500+ -----
53501+ when the RBAC system starts, the inode/dev
53502+ from exec_file will be one the RBAC system
53503+ is unaware of. It only knows the inode/dev
53504+ of the present file on disk, or the absence
53505+ of it.
53506+ */
53507+ preempt_disable();
53508+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
53509+
53510+ nmatch = lookup_name_entry(tmpname);
53511+ preempt_enable();
53512+ tmpsubj = NULL;
53513+ if (nmatch) {
53514+ if (nmatch->deleted)
53515+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
53516+ else
53517+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
53518+ if (tmpsubj != NULL)
53519+ task->acl = tmpsubj;
53520+ }
53521+ if (tmpsubj == NULL)
53522+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
53523+ task->role);
53524+ if (task->acl) {
53525+ task->is_writable = 0;
53526+ /* ignore additional mmap checks for processes that are writable
53527+ by the default ACL */
53528+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53529+ if (unlikely(obj->mode & GR_WRITE))
53530+ task->is_writable = 1;
53531+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53532+ if (unlikely(obj->mode & GR_WRITE))
53533+ task->is_writable = 1;
53534+
53535+ gr_set_proc_res(task);
53536+
53537+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53538+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53539+#endif
53540+ } else {
53541+ return 1;
53542+ }
53543+
53544+ return 0;
53545+}
53546+
53547+int
53548+gr_set_acls(const int type)
53549+{
53550+ struct task_struct *task, *task2;
53551+ struct acl_role_label *role = current->role;
53552+ __u16 acl_role_id = current->acl_role_id;
53553+ const struct cred *cred;
53554+ int ret;
53555+
53556+ rcu_read_lock();
53557+ read_lock(&tasklist_lock);
53558+ read_lock(&grsec_exec_file_lock);
53559+ do_each_thread(task2, task) {
53560+ /* check to see if we're called from the exit handler,
53561+ if so, only replace ACLs that have inherited the admin
53562+ ACL */
53563+
53564+ if (type && (task->role != role ||
53565+ task->acl_role_id != acl_role_id))
53566+ continue;
53567+
53568+ task->acl_role_id = 0;
53569+ task->acl_sp_role = 0;
53570+
53571+ if (task->exec_file) {
53572+ cred = __task_cred(task);
53573+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
53574+ ret = gr_apply_subject_to_task(task);
53575+ if (ret) {
53576+ read_unlock(&grsec_exec_file_lock);
53577+ read_unlock(&tasklist_lock);
53578+ rcu_read_unlock();
53579+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
53580+ return ret;
53581+ }
53582+ } else {
53583+ // it's a kernel process
53584+ task->role = kernel_role;
53585+ task->acl = kernel_role->root_label;
53586+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
53587+ task->acl->mode &= ~GR_PROCFIND;
53588+#endif
53589+ }
53590+ } while_each_thread(task2, task);
53591+ read_unlock(&grsec_exec_file_lock);
53592+ read_unlock(&tasklist_lock);
53593+ rcu_read_unlock();
53594+
53595+ return 0;
53596+}
53597+
53598+void
53599+gr_learn_resource(const struct task_struct *task,
53600+ const int res, const unsigned long wanted, const int gt)
53601+{
53602+ struct acl_subject_label *acl;
53603+ const struct cred *cred;
53604+
53605+ if (unlikely((gr_status & GR_READY) &&
53606+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
53607+ goto skip_reslog;
53608+
53609+#ifdef CONFIG_GRKERNSEC_RESLOG
53610+ gr_log_resource(task, res, wanted, gt);
53611+#endif
53612+ skip_reslog:
53613+
53614+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
53615+ return;
53616+
53617+ acl = task->acl;
53618+
53619+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
53620+ !(acl->resmask & (1 << (unsigned short) res))))
53621+ return;
53622+
53623+ if (wanted >= acl->res[res].rlim_cur) {
53624+ unsigned long res_add;
53625+
53626+ res_add = wanted;
53627+ switch (res) {
53628+ case RLIMIT_CPU:
53629+ res_add += GR_RLIM_CPU_BUMP;
53630+ break;
53631+ case RLIMIT_FSIZE:
53632+ res_add += GR_RLIM_FSIZE_BUMP;
53633+ break;
53634+ case RLIMIT_DATA:
53635+ res_add += GR_RLIM_DATA_BUMP;
53636+ break;
53637+ case RLIMIT_STACK:
53638+ res_add += GR_RLIM_STACK_BUMP;
53639+ break;
53640+ case RLIMIT_CORE:
53641+ res_add += GR_RLIM_CORE_BUMP;
53642+ break;
53643+ case RLIMIT_RSS:
53644+ res_add += GR_RLIM_RSS_BUMP;
53645+ break;
53646+ case RLIMIT_NPROC:
53647+ res_add += GR_RLIM_NPROC_BUMP;
53648+ break;
53649+ case RLIMIT_NOFILE:
53650+ res_add += GR_RLIM_NOFILE_BUMP;
53651+ break;
53652+ case RLIMIT_MEMLOCK:
53653+ res_add += GR_RLIM_MEMLOCK_BUMP;
53654+ break;
53655+ case RLIMIT_AS:
53656+ res_add += GR_RLIM_AS_BUMP;
53657+ break;
53658+ case RLIMIT_LOCKS:
53659+ res_add += GR_RLIM_LOCKS_BUMP;
53660+ break;
53661+ case RLIMIT_SIGPENDING:
53662+ res_add += GR_RLIM_SIGPENDING_BUMP;
53663+ break;
53664+ case RLIMIT_MSGQUEUE:
53665+ res_add += GR_RLIM_MSGQUEUE_BUMP;
53666+ break;
53667+ case RLIMIT_NICE:
53668+ res_add += GR_RLIM_NICE_BUMP;
53669+ break;
53670+ case RLIMIT_RTPRIO:
53671+ res_add += GR_RLIM_RTPRIO_BUMP;
53672+ break;
53673+ case RLIMIT_RTTIME:
53674+ res_add += GR_RLIM_RTTIME_BUMP;
53675+ break;
53676+ }
53677+
53678+ acl->res[res].rlim_cur = res_add;
53679+
53680+ if (wanted > acl->res[res].rlim_max)
53681+ acl->res[res].rlim_max = res_add;
53682+
53683+ /* only log the subject filename, since resource logging is supported for
53684+ single-subject learning only */
53685+ rcu_read_lock();
53686+ cred = __task_cred(task);
53687+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
53688+ task->role->roletype, cred->uid, cred->gid, acl->filename,
53689+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
53690+ "", (unsigned long) res, &task->signal->saved_ip);
53691+ rcu_read_unlock();
53692+ }
53693+
53694+ return;
53695+}
53696+
53697+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
53698+void
53699+pax_set_initial_flags(struct linux_binprm *bprm)
53700+{
53701+ struct task_struct *task = current;
53702+ struct acl_subject_label *proc;
53703+ unsigned long flags;
53704+
53705+ if (unlikely(!(gr_status & GR_READY)))
53706+ return;
53707+
53708+ flags = pax_get_flags(task);
53709+
53710+ proc = task->acl;
53711+
53712+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
53713+ flags &= ~MF_PAX_PAGEEXEC;
53714+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
53715+ flags &= ~MF_PAX_SEGMEXEC;
53716+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
53717+ flags &= ~MF_PAX_RANDMMAP;
53718+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
53719+ flags &= ~MF_PAX_EMUTRAMP;
53720+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
53721+ flags &= ~MF_PAX_MPROTECT;
53722+
53723+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
53724+ flags |= MF_PAX_PAGEEXEC;
53725+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
53726+ flags |= MF_PAX_SEGMEXEC;
53727+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
53728+ flags |= MF_PAX_RANDMMAP;
53729+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
53730+ flags |= MF_PAX_EMUTRAMP;
53731+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
53732+ flags |= MF_PAX_MPROTECT;
53733+
53734+ pax_set_flags(task, flags);
53735+
53736+ return;
53737+}
53738+#endif
53739+
53740+int
53741+gr_handle_proc_ptrace(struct task_struct *task)
53742+{
53743+ struct file *filp;
53744+ struct task_struct *tmp = task;
53745+ struct task_struct *curtemp = current;
53746+ __u32 retmode;
53747+
53748+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
53749+ if (unlikely(!(gr_status & GR_READY)))
53750+ return 0;
53751+#endif
53752+
53753+ read_lock(&tasklist_lock);
53754+ read_lock(&grsec_exec_file_lock);
53755+ filp = task->exec_file;
53756+
53757+ while (tmp->pid > 0) {
53758+ if (tmp == curtemp)
53759+ break;
53760+ tmp = tmp->real_parent;
53761+ }
53762+
53763+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
53764+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
53765+ read_unlock(&grsec_exec_file_lock);
53766+ read_unlock(&tasklist_lock);
53767+ return 1;
53768+ }
53769+
53770+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53771+ if (!(gr_status & GR_READY)) {
53772+ read_unlock(&grsec_exec_file_lock);
53773+ read_unlock(&tasklist_lock);
53774+ return 0;
53775+ }
53776+#endif
53777+
53778+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
53779+ read_unlock(&grsec_exec_file_lock);
53780+ read_unlock(&tasklist_lock);
53781+
53782+ if (retmode & GR_NOPTRACE)
53783+ return 1;
53784+
53785+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
53786+ && (current->acl != task->acl || (current->acl != current->role->root_label
53787+ && current->pid != task->pid)))
53788+ return 1;
53789+
53790+ return 0;
53791+}
53792+
53793+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
53794+{
53795+ if (unlikely(!(gr_status & GR_READY)))
53796+ return;
53797+
53798+ if (!(current->role->roletype & GR_ROLE_GOD))
53799+ return;
53800+
53801+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
53802+ p->role->rolename, gr_task_roletype_to_char(p),
53803+ p->acl->filename);
53804+}
53805+
53806+int
53807+gr_handle_ptrace(struct task_struct *task, const long request)
53808+{
53809+ struct task_struct *tmp = task;
53810+ struct task_struct *curtemp = current;
53811+ __u32 retmode;
53812+
53813+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
53814+ if (unlikely(!(gr_status & GR_READY)))
53815+ return 0;
53816+#endif
53817+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
53818+ read_lock(&tasklist_lock);
53819+ while (tmp->pid > 0) {
53820+ if (tmp == curtemp)
53821+ break;
53822+ tmp = tmp->real_parent;
53823+ }
53824+
53825+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
53826+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
53827+ read_unlock(&tasklist_lock);
53828+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53829+ return 1;
53830+ }
53831+ read_unlock(&tasklist_lock);
53832+ }
53833+
53834+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53835+ if (!(gr_status & GR_READY))
53836+ return 0;
53837+#endif
53838+
53839+ read_lock(&grsec_exec_file_lock);
53840+ if (unlikely(!task->exec_file)) {
53841+ read_unlock(&grsec_exec_file_lock);
53842+ return 0;
53843+ }
53844+
53845+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
53846+ read_unlock(&grsec_exec_file_lock);
53847+
53848+ if (retmode & GR_NOPTRACE) {
53849+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53850+ return 1;
53851+ }
53852+
53853+ if (retmode & GR_PTRACERD) {
53854+ switch (request) {
53855+ case PTRACE_SEIZE:
53856+ case PTRACE_POKETEXT:
53857+ case PTRACE_POKEDATA:
53858+ case PTRACE_POKEUSR:
53859+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
53860+ case PTRACE_SETREGS:
53861+ case PTRACE_SETFPREGS:
53862+#endif
53863+#ifdef CONFIG_X86
53864+ case PTRACE_SETFPXREGS:
53865+#endif
53866+#ifdef CONFIG_ALTIVEC
53867+ case PTRACE_SETVRREGS:
53868+#endif
53869+ return 1;
53870+ default:
53871+ return 0;
53872+ }
53873+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
53874+ !(current->role->roletype & GR_ROLE_GOD) &&
53875+ (current->acl != task->acl)) {
53876+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53877+ return 1;
53878+ }
53879+
53880+ return 0;
53881+}
53882+
53883+static int is_writable_mmap(const struct file *filp)
53884+{
53885+ struct task_struct *task = current;
53886+ struct acl_object_label *obj, *obj2;
53887+
53888+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
53889+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
53890+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53891+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
53892+ task->role->root_label);
53893+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
53894+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
53895+ return 1;
53896+ }
53897+ }
53898+ return 0;
53899+}
53900+
53901+int
53902+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
53903+{
53904+ __u32 mode;
53905+
53906+ if (unlikely(!file || !(prot & PROT_EXEC)))
53907+ return 1;
53908+
53909+ if (is_writable_mmap(file))
53910+ return 0;
53911+
53912+ mode =
53913+ gr_search_file(file->f_path.dentry,
53914+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
53915+ file->f_path.mnt);
53916+
53917+ if (!gr_tpe_allow(file))
53918+ return 0;
53919+
53920+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
53921+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53922+ return 0;
53923+ } else if (unlikely(!(mode & GR_EXEC))) {
53924+ return 0;
53925+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
53926+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53927+ return 1;
53928+ }
53929+
53930+ return 1;
53931+}
53932+
53933+int
53934+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53935+{
53936+ __u32 mode;
53937+
53938+ if (unlikely(!file || !(prot & PROT_EXEC)))
53939+ return 1;
53940+
53941+ if (is_writable_mmap(file))
53942+ return 0;
53943+
53944+ mode =
53945+ gr_search_file(file->f_path.dentry,
53946+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
53947+ file->f_path.mnt);
53948+
53949+ if (!gr_tpe_allow(file))
53950+ return 0;
53951+
53952+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
53953+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53954+ return 0;
53955+ } else if (unlikely(!(mode & GR_EXEC))) {
53956+ return 0;
53957+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
53958+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53959+ return 1;
53960+ }
53961+
53962+ return 1;
53963+}
53964+
53965+void
53966+gr_acl_handle_psacct(struct task_struct *task, const long code)
53967+{
53968+ unsigned long runtime;
53969+ unsigned long cputime;
53970+ unsigned int wday, cday;
53971+ __u8 whr, chr;
53972+ __u8 wmin, cmin;
53973+ __u8 wsec, csec;
53974+ struct timespec timeval;
53975+
53976+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
53977+ !(task->acl->mode & GR_PROCACCT)))
53978+ return;
53979+
53980+ do_posix_clock_monotonic_gettime(&timeval);
53981+ runtime = timeval.tv_sec - task->start_time.tv_sec;
53982+ wday = runtime / (3600 * 24);
53983+ runtime -= wday * (3600 * 24);
53984+ whr = runtime / 3600;
53985+ runtime -= whr * 3600;
53986+ wmin = runtime / 60;
53987+ runtime -= wmin * 60;
53988+ wsec = runtime;
53989+
53990+ cputime = (task->utime + task->stime) / HZ;
53991+ cday = cputime / (3600 * 24);
53992+ cputime -= cday * (3600 * 24);
53993+ chr = cputime / 3600;
53994+ cputime -= chr * 3600;
53995+ cmin = cputime / 60;
53996+ cputime -= cmin * 60;
53997+ csec = cputime;
53998+
53999+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
54000+
54001+ return;
54002+}
54003+
54004+void gr_set_kernel_label(struct task_struct *task)
54005+{
54006+ if (gr_status & GR_READY) {
54007+ task->role = kernel_role;
54008+ task->acl = kernel_role->root_label;
54009+ }
54010+ return;
54011+}
54012+
54013+#ifdef CONFIG_TASKSTATS
54014+int gr_is_taskstats_denied(int pid)
54015+{
54016+ struct task_struct *task;
54017+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54018+ const struct cred *cred;
54019+#endif
54020+ int ret = 0;
54021+
54022+ /* restrict taskstats viewing to un-chrooted root users
54023+ who have the 'view' subject flag if the RBAC system is enabled
54024+ */
54025+
54026+ rcu_read_lock();
54027+ read_lock(&tasklist_lock);
54028+ task = find_task_by_vpid(pid);
54029+ if (task) {
54030+#ifdef CONFIG_GRKERNSEC_CHROOT
54031+ if (proc_is_chrooted(task))
54032+ ret = -EACCES;
54033+#endif
54034+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54035+ cred = __task_cred(task);
54036+#ifdef CONFIG_GRKERNSEC_PROC_USER
54037+ if (cred->uid != 0)
54038+ ret = -EACCES;
54039+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54040+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
54041+ ret = -EACCES;
54042+#endif
54043+#endif
54044+ if (gr_status & GR_READY) {
54045+ if (!(task->acl->mode & GR_VIEW))
54046+ ret = -EACCES;
54047+ }
54048+ } else
54049+ ret = -ENOENT;
54050+
54051+ read_unlock(&tasklist_lock);
54052+ rcu_read_unlock();
54053+
54054+ return ret;
54055+}
54056+#endif
54057+
54058+/* AUXV entries are filled via a descendant of search_binary_handler
54059+ after we've already applied the subject for the target
54060+*/
54061+int gr_acl_enable_at_secure(void)
54062+{
54063+ if (unlikely(!(gr_status & GR_READY)))
54064+ return 0;
54065+
54066+ if (current->acl->mode & GR_ATSECURE)
54067+ return 1;
54068+
54069+ return 0;
54070+}
54071+
54072+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
54073+{
54074+ struct task_struct *task = current;
54075+ struct dentry *dentry = file->f_path.dentry;
54076+ struct vfsmount *mnt = file->f_path.mnt;
54077+ struct acl_object_label *obj, *tmp;
54078+ struct acl_subject_label *subj;
54079+ unsigned int bufsize;
54080+ int is_not_root;
54081+ char *path;
54082+ dev_t dev = __get_dev(dentry);
54083+
54084+ if (unlikely(!(gr_status & GR_READY)))
54085+ return 1;
54086+
54087+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
54088+ return 1;
54089+
54090+ /* ignore Eric Biederman */
54091+ if (IS_PRIVATE(dentry->d_inode))
54092+ return 1;
54093+
54094+ subj = task->acl;
54095+ do {
54096+ obj = lookup_acl_obj_label(ino, dev, subj);
54097+ if (obj != NULL)
54098+ return (obj->mode & GR_FIND) ? 1 : 0;
54099+ } while ((subj = subj->parent_subject));
54100+
54101+ /* this is purely an optimization since we're looking for an object
54102+ for the directory we're doing a readdir on
54103+ if it's possible for any globbed object to match the entry we're
54104+ filling into the directory, then the object we find here will be
54105+ an anchor point with attached globbed objects
54106+ */
54107+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
54108+ if (obj->globbed == NULL)
54109+ return (obj->mode & GR_FIND) ? 1 : 0;
54110+
54111+ is_not_root = ((obj->filename[0] == '/') &&
54112+ (obj->filename[1] == '\0')) ? 0 : 1;
54113+ bufsize = PAGE_SIZE - namelen - is_not_root;
54114+
54115+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
54116+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
54117+ return 1;
54118+
54119+ preempt_disable();
54120+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
54121+ bufsize);
54122+
54123+ bufsize = strlen(path);
54124+
54125+ /* if base is "/", don't append an additional slash */
54126+ if (is_not_root)
54127+ *(path + bufsize) = '/';
54128+ memcpy(path + bufsize + is_not_root, name, namelen);
54129+ *(path + bufsize + namelen + is_not_root) = '\0';
54130+
54131+ tmp = obj->globbed;
54132+ while (tmp) {
54133+ if (!glob_match(tmp->filename, path)) {
54134+ preempt_enable();
54135+ return (tmp->mode & GR_FIND) ? 1 : 0;
54136+ }
54137+ tmp = tmp->next;
54138+ }
54139+ preempt_enable();
54140+ return (obj->mode & GR_FIND) ? 1 : 0;
54141+}
54142+
54143+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
54144+EXPORT_SYMBOL(gr_acl_is_enabled);
54145+#endif
54146+EXPORT_SYMBOL(gr_learn_resource);
54147+EXPORT_SYMBOL(gr_set_kernel_label);
54148+#ifdef CONFIG_SECURITY
54149+EXPORT_SYMBOL(gr_check_user_change);
54150+EXPORT_SYMBOL(gr_check_group_change);
54151+#endif
54152+
54153diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
54154new file mode 100644
54155index 0000000..34fefda
54156--- /dev/null
54157+++ b/grsecurity/gracl_alloc.c
54158@@ -0,0 +1,105 @@
54159+#include <linux/kernel.h>
54160+#include <linux/mm.h>
54161+#include <linux/slab.h>
54162+#include <linux/vmalloc.h>
54163+#include <linux/gracl.h>
54164+#include <linux/grsecurity.h>
54165+
54166+static unsigned long alloc_stack_next = 1;
54167+static unsigned long alloc_stack_size = 1;
54168+static void **alloc_stack;
54169+
54170+static __inline__ int
54171+alloc_pop(void)
54172+{
54173+ if (alloc_stack_next == 1)
54174+ return 0;
54175+
54176+ kfree(alloc_stack[alloc_stack_next - 2]);
54177+
54178+ alloc_stack_next--;
54179+
54180+ return 1;
54181+}
54182+
54183+static __inline__ int
54184+alloc_push(void *buf)
54185+{
54186+ if (alloc_stack_next >= alloc_stack_size)
54187+ return 1;
54188+
54189+ alloc_stack[alloc_stack_next - 1] = buf;
54190+
54191+ alloc_stack_next++;
54192+
54193+ return 0;
54194+}
54195+
54196+void *
54197+acl_alloc(unsigned long len)
54198+{
54199+ void *ret = NULL;
54200+
54201+ if (!len || len > PAGE_SIZE)
54202+ goto out;
54203+
54204+ ret = kmalloc(len, GFP_KERNEL);
54205+
54206+ if (ret) {
54207+ if (alloc_push(ret)) {
54208+ kfree(ret);
54209+ ret = NULL;
54210+ }
54211+ }
54212+
54213+out:
54214+ return ret;
54215+}
54216+
54217+void *
54218+acl_alloc_num(unsigned long num, unsigned long len)
54219+{
54220+ if (!len || (num > (PAGE_SIZE / len)))
54221+ return NULL;
54222+
54223+ return acl_alloc(num * len);
54224+}
54225+
54226+void
54227+acl_free_all(void)
54228+{
54229+ if (gr_acl_is_enabled() || !alloc_stack)
54230+ return;
54231+
54232+ while (alloc_pop()) ;
54233+
54234+ if (alloc_stack) {
54235+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
54236+ kfree(alloc_stack);
54237+ else
54238+ vfree(alloc_stack);
54239+ }
54240+
54241+ alloc_stack = NULL;
54242+ alloc_stack_size = 1;
54243+ alloc_stack_next = 1;
54244+
54245+ return;
54246+}
54247+
54248+int
54249+acl_alloc_stack_init(unsigned long size)
54250+{
54251+ if ((size * sizeof (void *)) <= PAGE_SIZE)
54252+ alloc_stack =
54253+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
54254+ else
54255+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
54256+
54257+ alloc_stack_size = size;
54258+
54259+ if (!alloc_stack)
54260+ return 0;
54261+ else
54262+ return 1;
54263+}
54264diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
54265new file mode 100644
54266index 0000000..6d21049
54267--- /dev/null
54268+++ b/grsecurity/gracl_cap.c
54269@@ -0,0 +1,110 @@
54270+#include <linux/kernel.h>
54271+#include <linux/module.h>
54272+#include <linux/sched.h>
54273+#include <linux/gracl.h>
54274+#include <linux/grsecurity.h>
54275+#include <linux/grinternal.h>
54276+
54277+extern const char *captab_log[];
54278+extern int captab_log_entries;
54279+
54280+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
54281+{
54282+ struct acl_subject_label *curracl;
54283+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54284+ kernel_cap_t cap_audit = __cap_empty_set;
54285+
54286+ if (!gr_acl_is_enabled())
54287+ return 1;
54288+
54289+ curracl = task->acl;
54290+
54291+ cap_drop = curracl->cap_lower;
54292+ cap_mask = curracl->cap_mask;
54293+ cap_audit = curracl->cap_invert_audit;
54294+
54295+ while ((curracl = curracl->parent_subject)) {
54296+ /* if the cap isn't specified in the current computed mask but is specified in the
54297+ current level subject, and is lowered in the current level subject, then add
54298+ it to the set of dropped capabilities
54299+ otherwise, add the current level subject's mask to the current computed mask
54300+ */
54301+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54302+ cap_raise(cap_mask, cap);
54303+ if (cap_raised(curracl->cap_lower, cap))
54304+ cap_raise(cap_drop, cap);
54305+ if (cap_raised(curracl->cap_invert_audit, cap))
54306+ cap_raise(cap_audit, cap);
54307+ }
54308+ }
54309+
54310+ if (!cap_raised(cap_drop, cap)) {
54311+ if (cap_raised(cap_audit, cap))
54312+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
54313+ return 1;
54314+ }
54315+
54316+ curracl = task->acl;
54317+
54318+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
54319+ && cap_raised(cred->cap_effective, cap)) {
54320+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54321+ task->role->roletype, cred->uid,
54322+ cred->gid, task->exec_file ?
54323+ gr_to_filename(task->exec_file->f_path.dentry,
54324+ task->exec_file->f_path.mnt) : curracl->filename,
54325+ curracl->filename, 0UL,
54326+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
54327+ return 1;
54328+ }
54329+
54330+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
54331+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
54332+
54333+ return 0;
54334+}
54335+
54336+int
54337+gr_acl_is_capable(const int cap)
54338+{
54339+ return gr_task_acl_is_capable(current, current_cred(), cap);
54340+}
54341+
54342+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
54343+{
54344+ struct acl_subject_label *curracl;
54345+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54346+
54347+ if (!gr_acl_is_enabled())
54348+ return 1;
54349+
54350+ curracl = task->acl;
54351+
54352+ cap_drop = curracl->cap_lower;
54353+ cap_mask = curracl->cap_mask;
54354+
54355+ while ((curracl = curracl->parent_subject)) {
54356+ /* if the cap isn't specified in the current computed mask but is specified in the
54357+ current level subject, and is lowered in the current level subject, then add
54358+ it to the set of dropped capabilities
54359+ otherwise, add the current level subject's mask to the current computed mask
54360+ */
54361+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54362+ cap_raise(cap_mask, cap);
54363+ if (cap_raised(curracl->cap_lower, cap))
54364+ cap_raise(cap_drop, cap);
54365+ }
54366+ }
54367+
54368+ if (!cap_raised(cap_drop, cap))
54369+ return 1;
54370+
54371+ return 0;
54372+}
54373+
54374+int
54375+gr_acl_is_capable_nolog(const int cap)
54376+{
54377+ return gr_task_acl_is_capable_nolog(current, cap);
54378+}
54379+
54380diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
54381new file mode 100644
54382index 0000000..88d0e87
54383--- /dev/null
54384+++ b/grsecurity/gracl_fs.c
54385@@ -0,0 +1,435 @@
54386+#include <linux/kernel.h>
54387+#include <linux/sched.h>
54388+#include <linux/types.h>
54389+#include <linux/fs.h>
54390+#include <linux/file.h>
54391+#include <linux/stat.h>
54392+#include <linux/grsecurity.h>
54393+#include <linux/grinternal.h>
54394+#include <linux/gracl.h>
54395+
54396+umode_t
54397+gr_acl_umask(void)
54398+{
54399+ if (unlikely(!gr_acl_is_enabled()))
54400+ return 0;
54401+
54402+ return current->role->umask;
54403+}
54404+
54405+__u32
54406+gr_acl_handle_hidden_file(const struct dentry * dentry,
54407+ const struct vfsmount * mnt)
54408+{
54409+ __u32 mode;
54410+
54411+ if (unlikely(!dentry->d_inode))
54412+ return GR_FIND;
54413+
54414+ mode =
54415+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
54416+
54417+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
54418+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54419+ return mode;
54420+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
54421+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54422+ return 0;
54423+ } else if (unlikely(!(mode & GR_FIND)))
54424+ return 0;
54425+
54426+ return GR_FIND;
54427+}
54428+
54429+__u32
54430+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54431+ int acc_mode)
54432+{
54433+ __u32 reqmode = GR_FIND;
54434+ __u32 mode;
54435+
54436+ if (unlikely(!dentry->d_inode))
54437+ return reqmode;
54438+
54439+ if (acc_mode & MAY_APPEND)
54440+ reqmode |= GR_APPEND;
54441+ else if (acc_mode & MAY_WRITE)
54442+ reqmode |= GR_WRITE;
54443+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
54444+ reqmode |= GR_READ;
54445+
54446+ mode =
54447+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
54448+ mnt);
54449+
54450+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54451+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54452+ reqmode & GR_READ ? " reading" : "",
54453+ reqmode & GR_WRITE ? " writing" : reqmode &
54454+ GR_APPEND ? " appending" : "");
54455+ return reqmode;
54456+ } else
54457+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54458+ {
54459+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54460+ reqmode & GR_READ ? " reading" : "",
54461+ reqmode & GR_WRITE ? " writing" : reqmode &
54462+ GR_APPEND ? " appending" : "");
54463+ return 0;
54464+ } else if (unlikely((mode & reqmode) != reqmode))
54465+ return 0;
54466+
54467+ return reqmode;
54468+}
54469+
54470+__u32
54471+gr_acl_handle_creat(const struct dentry * dentry,
54472+ const struct dentry * p_dentry,
54473+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
54474+ const int imode)
54475+{
54476+ __u32 reqmode = GR_WRITE | GR_CREATE;
54477+ __u32 mode;
54478+
54479+ if (acc_mode & MAY_APPEND)
54480+ reqmode |= GR_APPEND;
54481+ // if a directory was required or the directory already exists, then
54482+ // don't count this open as a read
54483+ if ((acc_mode & MAY_READ) &&
54484+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
54485+ reqmode |= GR_READ;
54486+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
54487+ reqmode |= GR_SETID;
54488+
54489+ mode =
54490+ gr_check_create(dentry, p_dentry, p_mnt,
54491+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
54492+
54493+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54494+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
54495+ reqmode & GR_READ ? " reading" : "",
54496+ reqmode & GR_WRITE ? " writing" : reqmode &
54497+ GR_APPEND ? " appending" : "");
54498+ return reqmode;
54499+ } else
54500+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54501+ {
54502+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
54503+ reqmode & GR_READ ? " reading" : "",
54504+ reqmode & GR_WRITE ? " writing" : reqmode &
54505+ GR_APPEND ? " appending" : "");
54506+ return 0;
54507+ } else if (unlikely((mode & reqmode) != reqmode))
54508+ return 0;
54509+
54510+ return reqmode;
54511+}
54512+
54513+__u32
54514+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
54515+ const int fmode)
54516+{
54517+ __u32 mode, reqmode = GR_FIND;
54518+
54519+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
54520+ reqmode |= GR_EXEC;
54521+ if (fmode & S_IWOTH)
54522+ reqmode |= GR_WRITE;
54523+ if (fmode & S_IROTH)
54524+ reqmode |= GR_READ;
54525+
54526+ mode =
54527+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
54528+ mnt);
54529+
54530+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54531+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
54532+ reqmode & GR_READ ? " reading" : "",
54533+ reqmode & GR_WRITE ? " writing" : "",
54534+ reqmode & GR_EXEC ? " executing" : "");
54535+ return reqmode;
54536+ } else
54537+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54538+ {
54539+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
54540+ reqmode & GR_READ ? " reading" : "",
54541+ reqmode & GR_WRITE ? " writing" : "",
54542+ reqmode & GR_EXEC ? " executing" : "");
54543+ return 0;
54544+ } else if (unlikely((mode & reqmode) != reqmode))
54545+ return 0;
54546+
54547+ return reqmode;
54548+}
54549+
54550+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
54551+{
54552+ __u32 mode;
54553+
54554+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
54555+
54556+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
54557+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
54558+ return mode;
54559+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
54560+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
54561+ return 0;
54562+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
54563+ return 0;
54564+
54565+ return (reqmode);
54566+}
54567+
54568+__u32
54569+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
54570+{
54571+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
54572+}
54573+
54574+__u32
54575+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
54576+{
54577+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
54578+}
54579+
54580+__u32
54581+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
54582+{
54583+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
54584+}
54585+
54586+__u32
54587+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
54588+{
54589+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
54590+}
54591+
54592+__u32
54593+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
54594+ umode_t *modeptr)
54595+{
54596+ umode_t mode;
54597+
54598+ *modeptr &= ~gr_acl_umask();
54599+ mode = *modeptr;
54600+
54601+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
54602+ return 1;
54603+
54604+ if (unlikely(mode & (S_ISUID | S_ISGID))) {
54605+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
54606+ GR_CHMOD_ACL_MSG);
54607+ } else {
54608+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
54609+ }
54610+}
54611+
54612+__u32
54613+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
54614+{
54615+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
54616+}
54617+
54618+__u32
54619+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
54620+{
54621+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
54622+}
54623+
54624+__u32
54625+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
54626+{
54627+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
54628+}
54629+
54630+__u32
54631+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
54632+{
54633+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
54634+ GR_UNIXCONNECT_ACL_MSG);
54635+}
54636+
54637+/* hardlinks require at minimum create and link permission,
54638+ any additional privilege required is based on the
54639+ privilege of the file being linked to
54640+*/
54641+__u32
54642+gr_acl_handle_link(const struct dentry * new_dentry,
54643+ const struct dentry * parent_dentry,
54644+ const struct vfsmount * parent_mnt,
54645+ const struct dentry * old_dentry,
54646+ const struct vfsmount * old_mnt, const char *to)
54647+{
54648+ __u32 mode;
54649+ __u32 needmode = GR_CREATE | GR_LINK;
54650+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
54651+
54652+ mode =
54653+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
54654+ old_mnt);
54655+
54656+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
54657+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
54658+ return mode;
54659+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
54660+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
54661+ return 0;
54662+ } else if (unlikely((mode & needmode) != needmode))
54663+ return 0;
54664+
54665+ return 1;
54666+}
54667+
54668+__u32
54669+gr_acl_handle_symlink(const struct dentry * new_dentry,
54670+ const struct dentry * parent_dentry,
54671+ const struct vfsmount * parent_mnt, const char *from)
54672+{
54673+ __u32 needmode = GR_WRITE | GR_CREATE;
54674+ __u32 mode;
54675+
54676+ mode =
54677+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
54678+ GR_CREATE | GR_AUDIT_CREATE |
54679+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
54680+
54681+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
54682+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
54683+ return mode;
54684+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
54685+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
54686+ return 0;
54687+ } else if (unlikely((mode & needmode) != needmode))
54688+ return 0;
54689+
54690+ return (GR_WRITE | GR_CREATE);
54691+}
54692+
54693+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
54694+{
54695+ __u32 mode;
54696+
54697+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
54698+
54699+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
54700+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
54701+ return mode;
54702+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
54703+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
54704+ return 0;
54705+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
54706+ return 0;
54707+
54708+ return (reqmode);
54709+}
54710+
54711+__u32
54712+gr_acl_handle_mknod(const struct dentry * new_dentry,
54713+ const struct dentry * parent_dentry,
54714+ const struct vfsmount * parent_mnt,
54715+ const int mode)
54716+{
54717+ __u32 reqmode = GR_WRITE | GR_CREATE;
54718+ if (unlikely(mode & (S_ISUID | S_ISGID)))
54719+ reqmode |= GR_SETID;
54720+
54721+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
54722+ reqmode, GR_MKNOD_ACL_MSG);
54723+}
54724+
54725+__u32
54726+gr_acl_handle_mkdir(const struct dentry *new_dentry,
54727+ const struct dentry *parent_dentry,
54728+ const struct vfsmount *parent_mnt)
54729+{
54730+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
54731+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
54732+}
54733+
54734+#define RENAME_CHECK_SUCCESS(old, new) \
54735+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
54736+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
54737+
54738+int
54739+gr_acl_handle_rename(struct dentry *new_dentry,
54740+ struct dentry *parent_dentry,
54741+ const struct vfsmount *parent_mnt,
54742+ struct dentry *old_dentry,
54743+ struct inode *old_parent_inode,
54744+ struct vfsmount *old_mnt, const char *newname)
54745+{
54746+ __u32 comp1, comp2;
54747+ int error = 0;
54748+
54749+ if (unlikely(!gr_acl_is_enabled()))
54750+ return 0;
54751+
54752+ if (!new_dentry->d_inode) {
54753+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
54754+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
54755+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
54756+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
54757+ GR_DELETE | GR_AUDIT_DELETE |
54758+ GR_AUDIT_READ | GR_AUDIT_WRITE |
54759+ GR_SUPPRESS, old_mnt);
54760+ } else {
54761+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
54762+ GR_CREATE | GR_DELETE |
54763+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
54764+ GR_AUDIT_READ | GR_AUDIT_WRITE |
54765+ GR_SUPPRESS, parent_mnt);
54766+ comp2 =
54767+ gr_search_file(old_dentry,
54768+ GR_READ | GR_WRITE | GR_AUDIT_READ |
54769+ GR_DELETE | GR_AUDIT_DELETE |
54770+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
54771+ }
54772+
54773+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
54774+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
54775+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
54776+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
54777+ && !(comp2 & GR_SUPPRESS)) {
54778+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
54779+ error = -EACCES;
54780+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
54781+ error = -EACCES;
54782+
54783+ return error;
54784+}
54785+
54786+void
54787+gr_acl_handle_exit(void)
54788+{
54789+ u16 id;
54790+ char *rolename;
54791+ struct file *exec_file;
54792+
54793+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
54794+ !(current->role->roletype & GR_ROLE_PERSIST))) {
54795+ id = current->acl_role_id;
54796+ rolename = current->role->rolename;
54797+ gr_set_acls(1);
54798+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
54799+ }
54800+
54801+ write_lock(&grsec_exec_file_lock);
54802+ exec_file = current->exec_file;
54803+ current->exec_file = NULL;
54804+ write_unlock(&grsec_exec_file_lock);
54805+
54806+ if (exec_file)
54807+ fput(exec_file);
54808+}
54809+
54810+int
54811+gr_acl_handle_procpidmem(const struct task_struct *task)
54812+{
54813+ if (unlikely(!gr_acl_is_enabled()))
54814+ return 0;
54815+
54816+ if (task != current && task->acl->mode & GR_PROTPROCFD)
54817+ return -EACCES;
54818+
54819+ return 0;
54820+}
54821diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
54822new file mode 100644
54823index 0000000..58800a7
54824--- /dev/null
54825+++ b/grsecurity/gracl_ip.c
54826@@ -0,0 +1,384 @@
54827+#include <linux/kernel.h>
54828+#include <asm/uaccess.h>
54829+#include <asm/errno.h>
54830+#include <net/sock.h>
54831+#include <linux/file.h>
54832+#include <linux/fs.h>
54833+#include <linux/net.h>
54834+#include <linux/in.h>
54835+#include <linux/skbuff.h>
54836+#include <linux/ip.h>
54837+#include <linux/udp.h>
54838+#include <linux/types.h>
54839+#include <linux/sched.h>
54840+#include <linux/netdevice.h>
54841+#include <linux/inetdevice.h>
54842+#include <linux/gracl.h>
54843+#include <linux/grsecurity.h>
54844+#include <linux/grinternal.h>
54845+
54846+#define GR_BIND 0x01
54847+#define GR_CONNECT 0x02
54848+#define GR_INVERT 0x04
54849+#define GR_BINDOVERRIDE 0x08
54850+#define GR_CONNECTOVERRIDE 0x10
54851+#define GR_SOCK_FAMILY 0x20
54852+
54853+static const char * gr_protocols[IPPROTO_MAX] = {
54854+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
54855+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
54856+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
54857+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
54858+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
54859+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
54860+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
54861+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
54862+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
54863+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
54864+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
54865+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
54866+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
54867+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
54868+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
54869+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
54870+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
54871+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
54872+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
54873+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
54874+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
54875+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
54876+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
54877+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
54878+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
54879+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
54880+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
54881+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
54882+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
54883+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
54884+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
54885+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
54886+ };
54887+
54888+static const char * gr_socktypes[SOCK_MAX] = {
54889+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
54890+ "unknown:7", "unknown:8", "unknown:9", "packet"
54891+ };
54892+
54893+static const char * gr_sockfamilies[AF_MAX+1] = {
54894+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
54895+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
54896+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
54897+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
54898+ };
54899+
54900+const char *
54901+gr_proto_to_name(unsigned char proto)
54902+{
54903+ return gr_protocols[proto];
54904+}
54905+
54906+const char *
54907+gr_socktype_to_name(unsigned char type)
54908+{
54909+ return gr_socktypes[type];
54910+}
54911+
54912+const char *
54913+gr_sockfamily_to_name(unsigned char family)
54914+{
54915+ return gr_sockfamilies[family];
54916+}
54917+
54918+int
54919+gr_search_socket(const int domain, const int type, const int protocol)
54920+{
54921+ struct acl_subject_label *curr;
54922+ const struct cred *cred = current_cred();
54923+
54924+ if (unlikely(!gr_acl_is_enabled()))
54925+ goto exit;
54926+
54927+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
54928+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
54929+ goto exit; // let the kernel handle it
54930+
54931+ curr = current->acl;
54932+
54933+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
54934+ /* the family is allowed, if this is PF_INET allow it only if
54935+ the extra sock type/protocol checks pass */
54936+ if (domain == PF_INET)
54937+ goto inet_check;
54938+ goto exit;
54939+ } else {
54940+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
54941+ __u32 fakeip = 0;
54942+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54943+ current->role->roletype, cred->uid,
54944+ cred->gid, current->exec_file ?
54945+ gr_to_filename(current->exec_file->f_path.dentry,
54946+ current->exec_file->f_path.mnt) :
54947+ curr->filename, curr->filename,
54948+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
54949+ &current->signal->saved_ip);
54950+ goto exit;
54951+ }
54952+ goto exit_fail;
54953+ }
54954+
54955+inet_check:
54956+ /* the rest of this checking is for IPv4 only */
54957+ if (!curr->ips)
54958+ goto exit;
54959+
54960+ if ((curr->ip_type & (1 << type)) &&
54961+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
54962+ goto exit;
54963+
54964+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
54965+ /* we don't place acls on raw sockets , and sometimes
54966+ dgram/ip sockets are opened for ioctl and not
54967+ bind/connect, so we'll fake a bind learn log */
54968+ if (type == SOCK_RAW || type == SOCK_PACKET) {
54969+ __u32 fakeip = 0;
54970+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54971+ current->role->roletype, cred->uid,
54972+ cred->gid, current->exec_file ?
54973+ gr_to_filename(current->exec_file->f_path.dentry,
54974+ current->exec_file->f_path.mnt) :
54975+ curr->filename, curr->filename,
54976+ &fakeip, 0, type,
54977+ protocol, GR_CONNECT, &current->signal->saved_ip);
54978+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
54979+ __u32 fakeip = 0;
54980+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54981+ current->role->roletype, cred->uid,
54982+ cred->gid, current->exec_file ?
54983+ gr_to_filename(current->exec_file->f_path.dentry,
54984+ current->exec_file->f_path.mnt) :
54985+ curr->filename, curr->filename,
54986+ &fakeip, 0, type,
54987+ protocol, GR_BIND, &current->signal->saved_ip);
54988+ }
54989+ /* we'll log when they use connect or bind */
54990+ goto exit;
54991+ }
54992+
54993+exit_fail:
54994+ if (domain == PF_INET)
54995+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
54996+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
54997+ else
54998+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
54999+ gr_socktype_to_name(type), protocol);
55000+
55001+ return 0;
55002+exit:
55003+ return 1;
55004+}
55005+
55006+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
55007+{
55008+ if ((ip->mode & mode) &&
55009+ (ip_port >= ip->low) &&
55010+ (ip_port <= ip->high) &&
55011+ ((ntohl(ip_addr) & our_netmask) ==
55012+ (ntohl(our_addr) & our_netmask))
55013+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
55014+ && (ip->type & (1 << type))) {
55015+ if (ip->mode & GR_INVERT)
55016+ return 2; // specifically denied
55017+ else
55018+ return 1; // allowed
55019+ }
55020+
55021+ return 0; // not specifically allowed, may continue parsing
55022+}
55023+
55024+static int
55025+gr_search_connectbind(const int full_mode, struct sock *sk,
55026+ struct sockaddr_in *addr, const int type)
55027+{
55028+ char iface[IFNAMSIZ] = {0};
55029+ struct acl_subject_label *curr;
55030+ struct acl_ip_label *ip;
55031+ struct inet_sock *isk;
55032+ struct net_device *dev;
55033+ struct in_device *idev;
55034+ unsigned long i;
55035+ int ret;
55036+ int mode = full_mode & (GR_BIND | GR_CONNECT);
55037+ __u32 ip_addr = 0;
55038+ __u32 our_addr;
55039+ __u32 our_netmask;
55040+ char *p;
55041+ __u16 ip_port = 0;
55042+ const struct cred *cred = current_cred();
55043+
55044+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
55045+ return 0;
55046+
55047+ curr = current->acl;
55048+ isk = inet_sk(sk);
55049+
55050+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
55051+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
55052+ addr->sin_addr.s_addr = curr->inaddr_any_override;
55053+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
55054+ struct sockaddr_in saddr;
55055+ int err;
55056+
55057+ saddr.sin_family = AF_INET;
55058+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
55059+ saddr.sin_port = isk->inet_sport;
55060+
55061+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55062+ if (err)
55063+ return err;
55064+
55065+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55066+ if (err)
55067+ return err;
55068+ }
55069+
55070+ if (!curr->ips)
55071+ return 0;
55072+
55073+ ip_addr = addr->sin_addr.s_addr;
55074+ ip_port = ntohs(addr->sin_port);
55075+
55076+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55077+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55078+ current->role->roletype, cred->uid,
55079+ cred->gid, current->exec_file ?
55080+ gr_to_filename(current->exec_file->f_path.dentry,
55081+ current->exec_file->f_path.mnt) :
55082+ curr->filename, curr->filename,
55083+ &ip_addr, ip_port, type,
55084+ sk->sk_protocol, mode, &current->signal->saved_ip);
55085+ return 0;
55086+ }
55087+
55088+ for (i = 0; i < curr->ip_num; i++) {
55089+ ip = *(curr->ips + i);
55090+ if (ip->iface != NULL) {
55091+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
55092+ p = strchr(iface, ':');
55093+ if (p != NULL)
55094+ *p = '\0';
55095+ dev = dev_get_by_name(sock_net(sk), iface);
55096+ if (dev == NULL)
55097+ continue;
55098+ idev = in_dev_get(dev);
55099+ if (idev == NULL) {
55100+ dev_put(dev);
55101+ continue;
55102+ }
55103+ rcu_read_lock();
55104+ for_ifa(idev) {
55105+ if (!strcmp(ip->iface, ifa->ifa_label)) {
55106+ our_addr = ifa->ifa_address;
55107+ our_netmask = 0xffffffff;
55108+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55109+ if (ret == 1) {
55110+ rcu_read_unlock();
55111+ in_dev_put(idev);
55112+ dev_put(dev);
55113+ return 0;
55114+ } else if (ret == 2) {
55115+ rcu_read_unlock();
55116+ in_dev_put(idev);
55117+ dev_put(dev);
55118+ goto denied;
55119+ }
55120+ }
55121+ } endfor_ifa(idev);
55122+ rcu_read_unlock();
55123+ in_dev_put(idev);
55124+ dev_put(dev);
55125+ } else {
55126+ our_addr = ip->addr;
55127+ our_netmask = ip->netmask;
55128+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55129+ if (ret == 1)
55130+ return 0;
55131+ else if (ret == 2)
55132+ goto denied;
55133+ }
55134+ }
55135+
55136+denied:
55137+ if (mode == GR_BIND)
55138+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55139+ else if (mode == GR_CONNECT)
55140+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55141+
55142+ return -EACCES;
55143+}
55144+
55145+int
55146+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
55147+{
55148+ /* always allow disconnection of dgram sockets with connect */
55149+ if (addr->sin_family == AF_UNSPEC)
55150+ return 0;
55151+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
55152+}
55153+
55154+int
55155+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
55156+{
55157+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
55158+}
55159+
55160+int gr_search_listen(struct socket *sock)
55161+{
55162+ struct sock *sk = sock->sk;
55163+ struct sockaddr_in addr;
55164+
55165+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55166+ addr.sin_port = inet_sk(sk)->inet_sport;
55167+
55168+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55169+}
55170+
55171+int gr_search_accept(struct socket *sock)
55172+{
55173+ struct sock *sk = sock->sk;
55174+ struct sockaddr_in addr;
55175+
55176+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55177+ addr.sin_port = inet_sk(sk)->inet_sport;
55178+
55179+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55180+}
55181+
55182+int
55183+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
55184+{
55185+ if (addr)
55186+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
55187+ else {
55188+ struct sockaddr_in sin;
55189+ const struct inet_sock *inet = inet_sk(sk);
55190+
55191+ sin.sin_addr.s_addr = inet->inet_daddr;
55192+ sin.sin_port = inet->inet_dport;
55193+
55194+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55195+ }
55196+}
55197+
55198+int
55199+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
55200+{
55201+ struct sockaddr_in sin;
55202+
55203+ if (unlikely(skb->len < sizeof (struct udphdr)))
55204+ return 0; // skip this packet
55205+
55206+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
55207+ sin.sin_port = udp_hdr(skb)->source;
55208+
55209+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55210+}
55211diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
55212new file mode 100644
55213index 0000000..25f54ef
55214--- /dev/null
55215+++ b/grsecurity/gracl_learn.c
55216@@ -0,0 +1,207 @@
55217+#include <linux/kernel.h>
55218+#include <linux/mm.h>
55219+#include <linux/sched.h>
55220+#include <linux/poll.h>
55221+#include <linux/string.h>
55222+#include <linux/file.h>
55223+#include <linux/types.h>
55224+#include <linux/vmalloc.h>
55225+#include <linux/grinternal.h>
55226+
55227+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
55228+ size_t count, loff_t *ppos);
55229+extern int gr_acl_is_enabled(void);
55230+
55231+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
55232+static int gr_learn_attached;
55233+
55234+/* use a 512k buffer */
55235+#define LEARN_BUFFER_SIZE (512 * 1024)
55236+
55237+static DEFINE_SPINLOCK(gr_learn_lock);
55238+static DEFINE_MUTEX(gr_learn_user_mutex);
55239+
55240+/* we need to maintain two buffers, so that the kernel context of grlearn
55241+ uses a semaphore around the userspace copying, and the other kernel contexts
55242+ use a spinlock when copying into the buffer, since they cannot sleep
55243+*/
55244+static char *learn_buffer;
55245+static char *learn_buffer_user;
55246+static int learn_buffer_len;
55247+static int learn_buffer_user_len;
55248+
55249+static ssize_t
55250+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
55251+{
55252+ DECLARE_WAITQUEUE(wait, current);
55253+ ssize_t retval = 0;
55254+
55255+ add_wait_queue(&learn_wait, &wait);
55256+ set_current_state(TASK_INTERRUPTIBLE);
55257+ do {
55258+ mutex_lock(&gr_learn_user_mutex);
55259+ spin_lock(&gr_learn_lock);
55260+ if (learn_buffer_len)
55261+ break;
55262+ spin_unlock(&gr_learn_lock);
55263+ mutex_unlock(&gr_learn_user_mutex);
55264+ if (file->f_flags & O_NONBLOCK) {
55265+ retval = -EAGAIN;
55266+ goto out;
55267+ }
55268+ if (signal_pending(current)) {
55269+ retval = -ERESTARTSYS;
55270+ goto out;
55271+ }
55272+
55273+ schedule();
55274+ } while (1);
55275+
55276+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
55277+ learn_buffer_user_len = learn_buffer_len;
55278+ retval = learn_buffer_len;
55279+ learn_buffer_len = 0;
55280+
55281+ spin_unlock(&gr_learn_lock);
55282+
55283+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
55284+ retval = -EFAULT;
55285+
55286+ mutex_unlock(&gr_learn_user_mutex);
55287+out:
55288+ set_current_state(TASK_RUNNING);
55289+ remove_wait_queue(&learn_wait, &wait);
55290+ return retval;
55291+}
55292+
55293+static unsigned int
55294+poll_learn(struct file * file, poll_table * wait)
55295+{
55296+ poll_wait(file, &learn_wait, wait);
55297+
55298+ if (learn_buffer_len)
55299+ return (POLLIN | POLLRDNORM);
55300+
55301+ return 0;
55302+}
55303+
55304+void
55305+gr_clear_learn_entries(void)
55306+{
55307+ char *tmp;
55308+
55309+ mutex_lock(&gr_learn_user_mutex);
55310+ spin_lock(&gr_learn_lock);
55311+ tmp = learn_buffer;
55312+ learn_buffer = NULL;
55313+ spin_unlock(&gr_learn_lock);
55314+ if (tmp)
55315+ vfree(tmp);
55316+ if (learn_buffer_user != NULL) {
55317+ vfree(learn_buffer_user);
55318+ learn_buffer_user = NULL;
55319+ }
55320+ learn_buffer_len = 0;
55321+ mutex_unlock(&gr_learn_user_mutex);
55322+
55323+ return;
55324+}
55325+
55326+void
55327+gr_add_learn_entry(const char *fmt, ...)
55328+{
55329+ va_list args;
55330+ unsigned int len;
55331+
55332+ if (!gr_learn_attached)
55333+ return;
55334+
55335+ spin_lock(&gr_learn_lock);
55336+
55337+ /* leave a gap at the end so we know when it's "full" but don't have to
55338+ compute the exact length of the string we're trying to append
55339+ */
55340+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
55341+ spin_unlock(&gr_learn_lock);
55342+ wake_up_interruptible(&learn_wait);
55343+ return;
55344+ }
55345+ if (learn_buffer == NULL) {
55346+ spin_unlock(&gr_learn_lock);
55347+ return;
55348+ }
55349+
55350+ va_start(args, fmt);
55351+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
55352+ va_end(args);
55353+
55354+ learn_buffer_len += len + 1;
55355+
55356+ spin_unlock(&gr_learn_lock);
55357+ wake_up_interruptible(&learn_wait);
55358+
55359+ return;
55360+}
55361+
55362+static int
55363+open_learn(struct inode *inode, struct file *file)
55364+{
55365+ if (file->f_mode & FMODE_READ && gr_learn_attached)
55366+ return -EBUSY;
55367+ if (file->f_mode & FMODE_READ) {
55368+ int retval = 0;
55369+ mutex_lock(&gr_learn_user_mutex);
55370+ if (learn_buffer == NULL)
55371+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
55372+ if (learn_buffer_user == NULL)
55373+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
55374+ if (learn_buffer == NULL) {
55375+ retval = -ENOMEM;
55376+ goto out_error;
55377+ }
55378+ if (learn_buffer_user == NULL) {
55379+ retval = -ENOMEM;
55380+ goto out_error;
55381+ }
55382+ learn_buffer_len = 0;
55383+ learn_buffer_user_len = 0;
55384+ gr_learn_attached = 1;
55385+out_error:
55386+ mutex_unlock(&gr_learn_user_mutex);
55387+ return retval;
55388+ }
55389+ return 0;
55390+}
55391+
55392+static int
55393+close_learn(struct inode *inode, struct file *file)
55394+{
55395+ if (file->f_mode & FMODE_READ) {
55396+ char *tmp = NULL;
55397+ mutex_lock(&gr_learn_user_mutex);
55398+ spin_lock(&gr_learn_lock);
55399+ tmp = learn_buffer;
55400+ learn_buffer = NULL;
55401+ spin_unlock(&gr_learn_lock);
55402+ if (tmp)
55403+ vfree(tmp);
55404+ if (learn_buffer_user != NULL) {
55405+ vfree(learn_buffer_user);
55406+ learn_buffer_user = NULL;
55407+ }
55408+ learn_buffer_len = 0;
55409+ learn_buffer_user_len = 0;
55410+ gr_learn_attached = 0;
55411+ mutex_unlock(&gr_learn_user_mutex);
55412+ }
55413+
55414+ return 0;
55415+}
55416+
55417+const struct file_operations grsec_fops = {
55418+ .read = read_learn,
55419+ .write = write_grsec_handler,
55420+ .open = open_learn,
55421+ .release = close_learn,
55422+ .poll = poll_learn,
55423+};
55424diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
55425new file mode 100644
55426index 0000000..39645c9
55427--- /dev/null
55428+++ b/grsecurity/gracl_res.c
55429@@ -0,0 +1,68 @@
55430+#include <linux/kernel.h>
55431+#include <linux/sched.h>
55432+#include <linux/gracl.h>
55433+#include <linux/grinternal.h>
55434+
55435+static const char *restab_log[] = {
55436+ [RLIMIT_CPU] = "RLIMIT_CPU",
55437+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
55438+ [RLIMIT_DATA] = "RLIMIT_DATA",
55439+ [RLIMIT_STACK] = "RLIMIT_STACK",
55440+ [RLIMIT_CORE] = "RLIMIT_CORE",
55441+ [RLIMIT_RSS] = "RLIMIT_RSS",
55442+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
55443+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
55444+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
55445+ [RLIMIT_AS] = "RLIMIT_AS",
55446+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
55447+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
55448+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
55449+ [RLIMIT_NICE] = "RLIMIT_NICE",
55450+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
55451+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
55452+ [GR_CRASH_RES] = "RLIMIT_CRASH"
55453+};
55454+
55455+void
55456+gr_log_resource(const struct task_struct *task,
55457+ const int res, const unsigned long wanted, const int gt)
55458+{
55459+ const struct cred *cred;
55460+ unsigned long rlim;
55461+
55462+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
55463+ return;
55464+
55465+ // not yet supported resource
55466+ if (unlikely(!restab_log[res]))
55467+ return;
55468+
55469+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
55470+ rlim = task_rlimit_max(task, res);
55471+ else
55472+ rlim = task_rlimit(task, res);
55473+
55474+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
55475+ return;
55476+
55477+ rcu_read_lock();
55478+ cred = __task_cred(task);
55479+
55480+ if (res == RLIMIT_NPROC &&
55481+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
55482+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
55483+ goto out_rcu_unlock;
55484+ else if (res == RLIMIT_MEMLOCK &&
55485+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
55486+ goto out_rcu_unlock;
55487+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
55488+ goto out_rcu_unlock;
55489+ rcu_read_unlock();
55490+
55491+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
55492+
55493+ return;
55494+out_rcu_unlock:
55495+ rcu_read_unlock();
55496+ return;
55497+}
55498diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
55499new file mode 100644
55500index 0000000..5556be3
55501--- /dev/null
55502+++ b/grsecurity/gracl_segv.c
55503@@ -0,0 +1,299 @@
55504+#include <linux/kernel.h>
55505+#include <linux/mm.h>
55506+#include <asm/uaccess.h>
55507+#include <asm/errno.h>
55508+#include <asm/mman.h>
55509+#include <net/sock.h>
55510+#include <linux/file.h>
55511+#include <linux/fs.h>
55512+#include <linux/net.h>
55513+#include <linux/in.h>
55514+#include <linux/slab.h>
55515+#include <linux/types.h>
55516+#include <linux/sched.h>
55517+#include <linux/timer.h>
55518+#include <linux/gracl.h>
55519+#include <linux/grsecurity.h>
55520+#include <linux/grinternal.h>
55521+
55522+static struct crash_uid *uid_set;
55523+static unsigned short uid_used;
55524+static DEFINE_SPINLOCK(gr_uid_lock);
55525+extern rwlock_t gr_inode_lock;
55526+extern struct acl_subject_label *
55527+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
55528+ struct acl_role_label *role);
55529+
55530+#ifdef CONFIG_BTRFS_FS
55531+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
55532+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
55533+#endif
55534+
55535+static inline dev_t __get_dev(const struct dentry *dentry)
55536+{
55537+#ifdef CONFIG_BTRFS_FS
55538+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
55539+ return get_btrfs_dev_from_inode(dentry->d_inode);
55540+ else
55541+#endif
55542+ return dentry->d_inode->i_sb->s_dev;
55543+}
55544+
55545+int
55546+gr_init_uidset(void)
55547+{
55548+ uid_set =
55549+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
55550+ uid_used = 0;
55551+
55552+ return uid_set ? 1 : 0;
55553+}
55554+
55555+void
55556+gr_free_uidset(void)
55557+{
55558+ if (uid_set)
55559+ kfree(uid_set);
55560+
55561+ return;
55562+}
55563+
55564+int
55565+gr_find_uid(const uid_t uid)
55566+{
55567+ struct crash_uid *tmp = uid_set;
55568+ uid_t buid;
55569+ int low = 0, high = uid_used - 1, mid;
55570+
55571+ while (high >= low) {
55572+ mid = (low + high) >> 1;
55573+ buid = tmp[mid].uid;
55574+ if (buid == uid)
55575+ return mid;
55576+ if (buid > uid)
55577+ high = mid - 1;
55578+ if (buid < uid)
55579+ low = mid + 1;
55580+ }
55581+
55582+ return -1;
55583+}
55584+
55585+static __inline__ void
55586+gr_insertsort(void)
55587+{
55588+ unsigned short i, j;
55589+ struct crash_uid index;
55590+
55591+ for (i = 1; i < uid_used; i++) {
55592+ index = uid_set[i];
55593+ j = i;
55594+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
55595+ uid_set[j] = uid_set[j - 1];
55596+ j--;
55597+ }
55598+ uid_set[j] = index;
55599+ }
55600+
55601+ return;
55602+}
55603+
55604+static __inline__ void
55605+gr_insert_uid(const uid_t uid, const unsigned long expires)
55606+{
55607+ int loc;
55608+
55609+ if (uid_used == GR_UIDTABLE_MAX)
55610+ return;
55611+
55612+ loc = gr_find_uid(uid);
55613+
55614+ if (loc >= 0) {
55615+ uid_set[loc].expires = expires;
55616+ return;
55617+ }
55618+
55619+ uid_set[uid_used].uid = uid;
55620+ uid_set[uid_used].expires = expires;
55621+ uid_used++;
55622+
55623+ gr_insertsort();
55624+
55625+ return;
55626+}
55627+
55628+void
55629+gr_remove_uid(const unsigned short loc)
55630+{
55631+ unsigned short i;
55632+
55633+ for (i = loc + 1; i < uid_used; i++)
55634+ uid_set[i - 1] = uid_set[i];
55635+
55636+ uid_used--;
55637+
55638+ return;
55639+}
55640+
55641+int
55642+gr_check_crash_uid(const uid_t uid)
55643+{
55644+ int loc;
55645+ int ret = 0;
55646+
55647+ if (unlikely(!gr_acl_is_enabled()))
55648+ return 0;
55649+
55650+ spin_lock(&gr_uid_lock);
55651+ loc = gr_find_uid(uid);
55652+
55653+ if (loc < 0)
55654+ goto out_unlock;
55655+
55656+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
55657+ gr_remove_uid(loc);
55658+ else
55659+ ret = 1;
55660+
55661+out_unlock:
55662+ spin_unlock(&gr_uid_lock);
55663+ return ret;
55664+}
55665+
55666+static __inline__ int
55667+proc_is_setxid(const struct cred *cred)
55668+{
55669+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
55670+ cred->uid != cred->fsuid)
55671+ return 1;
55672+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
55673+ cred->gid != cred->fsgid)
55674+ return 1;
55675+
55676+ return 0;
55677+}
55678+
55679+extern int gr_fake_force_sig(int sig, struct task_struct *t);
55680+
55681+void
55682+gr_handle_crash(struct task_struct *task, const int sig)
55683+{
55684+ struct acl_subject_label *curr;
55685+ struct task_struct *tsk, *tsk2;
55686+ const struct cred *cred;
55687+ const struct cred *cred2;
55688+
55689+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
55690+ return;
55691+
55692+ if (unlikely(!gr_acl_is_enabled()))
55693+ return;
55694+
55695+ curr = task->acl;
55696+
55697+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
55698+ return;
55699+
55700+ if (time_before_eq(curr->expires, get_seconds())) {
55701+ curr->expires = 0;
55702+ curr->crashes = 0;
55703+ }
55704+
55705+ curr->crashes++;
55706+
55707+ if (!curr->expires)
55708+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
55709+
55710+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
55711+ time_after(curr->expires, get_seconds())) {
55712+ rcu_read_lock();
55713+ cred = __task_cred(task);
55714+ if (cred->uid && proc_is_setxid(cred)) {
55715+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
55716+ spin_lock(&gr_uid_lock);
55717+ gr_insert_uid(cred->uid, curr->expires);
55718+ spin_unlock(&gr_uid_lock);
55719+ curr->expires = 0;
55720+ curr->crashes = 0;
55721+ read_lock(&tasklist_lock);
55722+ do_each_thread(tsk2, tsk) {
55723+ cred2 = __task_cred(tsk);
55724+ if (tsk != task && cred2->uid == cred->uid)
55725+ gr_fake_force_sig(SIGKILL, tsk);
55726+ } while_each_thread(tsk2, tsk);
55727+ read_unlock(&tasklist_lock);
55728+ } else {
55729+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
55730+ read_lock(&tasklist_lock);
55731+ read_lock(&grsec_exec_file_lock);
55732+ do_each_thread(tsk2, tsk) {
55733+ if (likely(tsk != task)) {
55734+ // if this thread has the same subject as the one that triggered
55735+ // RES_CRASH and it's the same binary, kill it
55736+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
55737+ gr_fake_force_sig(SIGKILL, tsk);
55738+ }
55739+ } while_each_thread(tsk2, tsk);
55740+ read_unlock(&grsec_exec_file_lock);
55741+ read_unlock(&tasklist_lock);
55742+ }
55743+ rcu_read_unlock();
55744+ }
55745+
55746+ return;
55747+}
55748+
55749+int
55750+gr_check_crash_exec(const struct file *filp)
55751+{
55752+ struct acl_subject_label *curr;
55753+
55754+ if (unlikely(!gr_acl_is_enabled()))
55755+ return 0;
55756+
55757+ read_lock(&gr_inode_lock);
55758+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
55759+ __get_dev(filp->f_path.dentry),
55760+ current->role);
55761+ read_unlock(&gr_inode_lock);
55762+
55763+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
55764+ (!curr->crashes && !curr->expires))
55765+ return 0;
55766+
55767+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
55768+ time_after(curr->expires, get_seconds()))
55769+ return 1;
55770+ else if (time_before_eq(curr->expires, get_seconds())) {
55771+ curr->crashes = 0;
55772+ curr->expires = 0;
55773+ }
55774+
55775+ return 0;
55776+}
55777+
55778+void
55779+gr_handle_alertkill(struct task_struct *task)
55780+{
55781+ struct acl_subject_label *curracl;
55782+ __u32 curr_ip;
55783+ struct task_struct *p, *p2;
55784+
55785+ if (unlikely(!gr_acl_is_enabled()))
55786+ return;
55787+
55788+ curracl = task->acl;
55789+ curr_ip = task->signal->curr_ip;
55790+
55791+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
55792+ read_lock(&tasklist_lock);
55793+ do_each_thread(p2, p) {
55794+ if (p->signal->curr_ip == curr_ip)
55795+ gr_fake_force_sig(SIGKILL, p);
55796+ } while_each_thread(p2, p);
55797+ read_unlock(&tasklist_lock);
55798+ } else if (curracl->mode & GR_KILLPROC)
55799+ gr_fake_force_sig(SIGKILL, task);
55800+
55801+ return;
55802+}
55803diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
55804new file mode 100644
55805index 0000000..9d83a69
55806--- /dev/null
55807+++ b/grsecurity/gracl_shm.c
55808@@ -0,0 +1,40 @@
55809+#include <linux/kernel.h>
55810+#include <linux/mm.h>
55811+#include <linux/sched.h>
55812+#include <linux/file.h>
55813+#include <linux/ipc.h>
55814+#include <linux/gracl.h>
55815+#include <linux/grsecurity.h>
55816+#include <linux/grinternal.h>
55817+
55818+int
55819+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55820+ const time_t shm_createtime, const uid_t cuid, const int shmid)
55821+{
55822+ struct task_struct *task;
55823+
55824+ if (!gr_acl_is_enabled())
55825+ return 1;
55826+
55827+ rcu_read_lock();
55828+ read_lock(&tasklist_lock);
55829+
55830+ task = find_task_by_vpid(shm_cprid);
55831+
55832+ if (unlikely(!task))
55833+ task = find_task_by_vpid(shm_lapid);
55834+
55835+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
55836+ (task->pid == shm_lapid)) &&
55837+ (task->acl->mode & GR_PROTSHM) &&
55838+ (task->acl != current->acl))) {
55839+ read_unlock(&tasklist_lock);
55840+ rcu_read_unlock();
55841+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
55842+ return 0;
55843+ }
55844+ read_unlock(&tasklist_lock);
55845+ rcu_read_unlock();
55846+
55847+ return 1;
55848+}
55849diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
55850new file mode 100644
55851index 0000000..bc0be01
55852--- /dev/null
55853+++ b/grsecurity/grsec_chdir.c
55854@@ -0,0 +1,19 @@
55855+#include <linux/kernel.h>
55856+#include <linux/sched.h>
55857+#include <linux/fs.h>
55858+#include <linux/file.h>
55859+#include <linux/grsecurity.h>
55860+#include <linux/grinternal.h>
55861+
55862+void
55863+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
55864+{
55865+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55866+ if ((grsec_enable_chdir && grsec_enable_group &&
55867+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
55868+ !grsec_enable_group)) {
55869+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
55870+ }
55871+#endif
55872+ return;
55873+}
55874diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
55875new file mode 100644
55876index 0000000..9807ee2
55877--- /dev/null
55878+++ b/grsecurity/grsec_chroot.c
55879@@ -0,0 +1,368 @@
55880+#include <linux/kernel.h>
55881+#include <linux/module.h>
55882+#include <linux/sched.h>
55883+#include <linux/file.h>
55884+#include <linux/fs.h>
55885+#include <linux/mount.h>
55886+#include <linux/types.h>
55887+#include "../fs/mount.h"
55888+#include <linux/grsecurity.h>
55889+#include <linux/grinternal.h>
55890+
55891+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
55892+{
55893+#ifdef CONFIG_GRKERNSEC
55894+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
55895+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
55896+ task->gr_is_chrooted = 1;
55897+ else
55898+ task->gr_is_chrooted = 0;
55899+
55900+ task->gr_chroot_dentry = path->dentry;
55901+#endif
55902+ return;
55903+}
55904+
55905+void gr_clear_chroot_entries(struct task_struct *task)
55906+{
55907+#ifdef CONFIG_GRKERNSEC
55908+ task->gr_is_chrooted = 0;
55909+ task->gr_chroot_dentry = NULL;
55910+#endif
55911+ return;
55912+}
55913+
55914+int
55915+gr_handle_chroot_unix(const pid_t pid)
55916+{
55917+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55918+ struct task_struct *p;
55919+
55920+ if (unlikely(!grsec_enable_chroot_unix))
55921+ return 1;
55922+
55923+ if (likely(!proc_is_chrooted(current)))
55924+ return 1;
55925+
55926+ rcu_read_lock();
55927+ read_lock(&tasklist_lock);
55928+ p = find_task_by_vpid_unrestricted(pid);
55929+ if (unlikely(p && !have_same_root(current, p))) {
55930+ read_unlock(&tasklist_lock);
55931+ rcu_read_unlock();
55932+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
55933+ return 0;
55934+ }
55935+ read_unlock(&tasklist_lock);
55936+ rcu_read_unlock();
55937+#endif
55938+ return 1;
55939+}
55940+
55941+int
55942+gr_handle_chroot_nice(void)
55943+{
55944+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55945+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
55946+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
55947+ return -EPERM;
55948+ }
55949+#endif
55950+ return 0;
55951+}
55952+
55953+int
55954+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
55955+{
55956+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55957+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
55958+ && proc_is_chrooted(current)) {
55959+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
55960+ return -EACCES;
55961+ }
55962+#endif
55963+ return 0;
55964+}
55965+
55966+int
55967+gr_handle_chroot_rawio(const struct inode *inode)
55968+{
55969+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55970+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
55971+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
55972+ return 1;
55973+#endif
55974+ return 0;
55975+}
55976+
55977+int
55978+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
55979+{
55980+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55981+ struct task_struct *p;
55982+ int ret = 0;
55983+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
55984+ return ret;
55985+
55986+ read_lock(&tasklist_lock);
55987+ do_each_pid_task(pid, type, p) {
55988+ if (!have_same_root(current, p)) {
55989+ ret = 1;
55990+ goto out;
55991+ }
55992+ } while_each_pid_task(pid, type, p);
55993+out:
55994+ read_unlock(&tasklist_lock);
55995+ return ret;
55996+#endif
55997+ return 0;
55998+}
55999+
56000+int
56001+gr_pid_is_chrooted(struct task_struct *p)
56002+{
56003+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56004+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
56005+ return 0;
56006+
56007+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
56008+ !have_same_root(current, p)) {
56009+ return 1;
56010+ }
56011+#endif
56012+ return 0;
56013+}
56014+
56015+EXPORT_SYMBOL(gr_pid_is_chrooted);
56016+
56017+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
56018+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
56019+{
56020+ struct path path, currentroot;
56021+ int ret = 0;
56022+
56023+ path.dentry = (struct dentry *)u_dentry;
56024+ path.mnt = (struct vfsmount *)u_mnt;
56025+ get_fs_root(current->fs, &currentroot);
56026+ if (path_is_under(&path, &currentroot))
56027+ ret = 1;
56028+ path_put(&currentroot);
56029+
56030+ return ret;
56031+}
56032+#endif
56033+
56034+int
56035+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
56036+{
56037+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56038+ if (!grsec_enable_chroot_fchdir)
56039+ return 1;
56040+
56041+ if (!proc_is_chrooted(current))
56042+ return 1;
56043+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
56044+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
56045+ return 0;
56046+ }
56047+#endif
56048+ return 1;
56049+}
56050+
56051+int
56052+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56053+ const time_t shm_createtime)
56054+{
56055+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56056+ struct task_struct *p;
56057+ time_t starttime;
56058+
56059+ if (unlikely(!grsec_enable_chroot_shmat))
56060+ return 1;
56061+
56062+ if (likely(!proc_is_chrooted(current)))
56063+ return 1;
56064+
56065+ rcu_read_lock();
56066+ read_lock(&tasklist_lock);
56067+
56068+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
56069+ starttime = p->start_time.tv_sec;
56070+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
56071+ if (have_same_root(current, p)) {
56072+ goto allow;
56073+ } else {
56074+ read_unlock(&tasklist_lock);
56075+ rcu_read_unlock();
56076+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56077+ return 0;
56078+ }
56079+ }
56080+ /* creator exited, pid reuse, fall through to next check */
56081+ }
56082+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
56083+ if (unlikely(!have_same_root(current, p))) {
56084+ read_unlock(&tasklist_lock);
56085+ rcu_read_unlock();
56086+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56087+ return 0;
56088+ }
56089+ }
56090+
56091+allow:
56092+ read_unlock(&tasklist_lock);
56093+ rcu_read_unlock();
56094+#endif
56095+ return 1;
56096+}
56097+
56098+void
56099+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
56100+{
56101+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56102+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
56103+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
56104+#endif
56105+ return;
56106+}
56107+
56108+int
56109+gr_handle_chroot_mknod(const struct dentry *dentry,
56110+ const struct vfsmount *mnt, const int mode)
56111+{
56112+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56113+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
56114+ proc_is_chrooted(current)) {
56115+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
56116+ return -EPERM;
56117+ }
56118+#endif
56119+ return 0;
56120+}
56121+
56122+int
56123+gr_handle_chroot_mount(const struct dentry *dentry,
56124+ const struct vfsmount *mnt, const char *dev_name)
56125+{
56126+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56127+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
56128+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
56129+ return -EPERM;
56130+ }
56131+#endif
56132+ return 0;
56133+}
56134+
56135+int
56136+gr_handle_chroot_pivot(void)
56137+{
56138+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56139+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
56140+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
56141+ return -EPERM;
56142+ }
56143+#endif
56144+ return 0;
56145+}
56146+
56147+int
56148+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
56149+{
56150+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56151+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
56152+ !gr_is_outside_chroot(dentry, mnt)) {
56153+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
56154+ return -EPERM;
56155+ }
56156+#endif
56157+ return 0;
56158+}
56159+
56160+extern const char *captab_log[];
56161+extern int captab_log_entries;
56162+
56163+int
56164+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
56165+{
56166+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56167+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
56168+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56169+ if (cap_raised(chroot_caps, cap)) {
56170+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
56171+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
56172+ }
56173+ return 0;
56174+ }
56175+ }
56176+#endif
56177+ return 1;
56178+}
56179+
56180+int
56181+gr_chroot_is_capable(const int cap)
56182+{
56183+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56184+ return gr_task_chroot_is_capable(current, current_cred(), cap);
56185+#endif
56186+ return 1;
56187+}
56188+
56189+int
56190+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
56191+{
56192+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56193+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
56194+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56195+ if (cap_raised(chroot_caps, cap)) {
56196+ return 0;
56197+ }
56198+ }
56199+#endif
56200+ return 1;
56201+}
56202+
56203+int
56204+gr_chroot_is_capable_nolog(const int cap)
56205+{
56206+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56207+ return gr_task_chroot_is_capable_nolog(current, cap);
56208+#endif
56209+ return 1;
56210+}
56211+
56212+int
56213+gr_handle_chroot_sysctl(const int op)
56214+{
56215+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56216+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
56217+ proc_is_chrooted(current))
56218+ return -EACCES;
56219+#endif
56220+ return 0;
56221+}
56222+
56223+void
56224+gr_handle_chroot_chdir(struct path *path)
56225+{
56226+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56227+ if (grsec_enable_chroot_chdir)
56228+ set_fs_pwd(current->fs, path);
56229+#endif
56230+ return;
56231+}
56232+
56233+int
56234+gr_handle_chroot_chmod(const struct dentry *dentry,
56235+ const struct vfsmount *mnt, const int mode)
56236+{
56237+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56238+ /* allow chmod +s on directories, but not files */
56239+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
56240+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
56241+ proc_is_chrooted(current)) {
56242+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
56243+ return -EPERM;
56244+ }
56245+#endif
56246+ return 0;
56247+}
56248diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
56249new file mode 100644
56250index 0000000..213ad8b
56251--- /dev/null
56252+++ b/grsecurity/grsec_disabled.c
56253@@ -0,0 +1,437 @@
56254+#include <linux/kernel.h>
56255+#include <linux/module.h>
56256+#include <linux/sched.h>
56257+#include <linux/file.h>
56258+#include <linux/fs.h>
56259+#include <linux/kdev_t.h>
56260+#include <linux/net.h>
56261+#include <linux/in.h>
56262+#include <linux/ip.h>
56263+#include <linux/skbuff.h>
56264+#include <linux/sysctl.h>
56265+
56266+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56267+void
56268+pax_set_initial_flags(struct linux_binprm *bprm)
56269+{
56270+ return;
56271+}
56272+#endif
56273+
56274+#ifdef CONFIG_SYSCTL
56275+__u32
56276+gr_handle_sysctl(const struct ctl_table * table, const int op)
56277+{
56278+ return 0;
56279+}
56280+#endif
56281+
56282+#ifdef CONFIG_TASKSTATS
56283+int gr_is_taskstats_denied(int pid)
56284+{
56285+ return 0;
56286+}
56287+#endif
56288+
56289+int
56290+gr_acl_is_enabled(void)
56291+{
56292+ return 0;
56293+}
56294+
56295+void
56296+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
56297+{
56298+ return;
56299+}
56300+
56301+int
56302+gr_handle_rawio(const struct inode *inode)
56303+{
56304+ return 0;
56305+}
56306+
56307+void
56308+gr_acl_handle_psacct(struct task_struct *task, const long code)
56309+{
56310+ return;
56311+}
56312+
56313+int
56314+gr_handle_ptrace(struct task_struct *task, const long request)
56315+{
56316+ return 0;
56317+}
56318+
56319+int
56320+gr_handle_proc_ptrace(struct task_struct *task)
56321+{
56322+ return 0;
56323+}
56324+
56325+void
56326+gr_learn_resource(const struct task_struct *task,
56327+ const int res, const unsigned long wanted, const int gt)
56328+{
56329+ return;
56330+}
56331+
56332+int
56333+gr_set_acls(const int type)
56334+{
56335+ return 0;
56336+}
56337+
56338+int
56339+gr_check_hidden_task(const struct task_struct *tsk)
56340+{
56341+ return 0;
56342+}
56343+
56344+int
56345+gr_check_protected_task(const struct task_struct *task)
56346+{
56347+ return 0;
56348+}
56349+
56350+int
56351+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
56352+{
56353+ return 0;
56354+}
56355+
56356+void
56357+gr_copy_label(struct task_struct *tsk)
56358+{
56359+ return;
56360+}
56361+
56362+void
56363+gr_set_pax_flags(struct task_struct *task)
56364+{
56365+ return;
56366+}
56367+
56368+int
56369+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
56370+ const int unsafe_share)
56371+{
56372+ return 0;
56373+}
56374+
56375+void
56376+gr_handle_delete(const ino_t ino, const dev_t dev)
56377+{
56378+ return;
56379+}
56380+
56381+void
56382+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
56383+{
56384+ return;
56385+}
56386+
56387+void
56388+gr_handle_crash(struct task_struct *task, const int sig)
56389+{
56390+ return;
56391+}
56392+
56393+int
56394+gr_check_crash_exec(const struct file *filp)
56395+{
56396+ return 0;
56397+}
56398+
56399+int
56400+gr_check_crash_uid(const uid_t uid)
56401+{
56402+ return 0;
56403+}
56404+
56405+void
56406+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
56407+ struct dentry *old_dentry,
56408+ struct dentry *new_dentry,
56409+ struct vfsmount *mnt, const __u8 replace)
56410+{
56411+ return;
56412+}
56413+
56414+int
56415+gr_search_socket(const int family, const int type, const int protocol)
56416+{
56417+ return 1;
56418+}
56419+
56420+int
56421+gr_search_connectbind(const int mode, const struct socket *sock,
56422+ const struct sockaddr_in *addr)
56423+{
56424+ return 0;
56425+}
56426+
56427+void
56428+gr_handle_alertkill(struct task_struct *task)
56429+{
56430+ return;
56431+}
56432+
56433+__u32
56434+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
56435+{
56436+ return 1;
56437+}
56438+
56439+__u32
56440+gr_acl_handle_hidden_file(const struct dentry * dentry,
56441+ const struct vfsmount * mnt)
56442+{
56443+ return 1;
56444+}
56445+
56446+__u32
56447+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
56448+ int acc_mode)
56449+{
56450+ return 1;
56451+}
56452+
56453+__u32
56454+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
56455+{
56456+ return 1;
56457+}
56458+
56459+__u32
56460+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
56461+{
56462+ return 1;
56463+}
56464+
56465+int
56466+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
56467+ unsigned int *vm_flags)
56468+{
56469+ return 1;
56470+}
56471+
56472+__u32
56473+gr_acl_handle_truncate(const struct dentry * dentry,
56474+ const struct vfsmount * mnt)
56475+{
56476+ return 1;
56477+}
56478+
56479+__u32
56480+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
56481+{
56482+ return 1;
56483+}
56484+
56485+__u32
56486+gr_acl_handle_access(const struct dentry * dentry,
56487+ const struct vfsmount * mnt, const int fmode)
56488+{
56489+ return 1;
56490+}
56491+
56492+__u32
56493+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
56494+ umode_t *mode)
56495+{
56496+ return 1;
56497+}
56498+
56499+__u32
56500+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
56501+{
56502+ return 1;
56503+}
56504+
56505+__u32
56506+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
56507+{
56508+ return 1;
56509+}
56510+
56511+void
56512+grsecurity_init(void)
56513+{
56514+ return;
56515+}
56516+
56517+umode_t gr_acl_umask(void)
56518+{
56519+ return 0;
56520+}
56521+
56522+__u32
56523+gr_acl_handle_mknod(const struct dentry * new_dentry,
56524+ const struct dentry * parent_dentry,
56525+ const struct vfsmount * parent_mnt,
56526+ const int mode)
56527+{
56528+ return 1;
56529+}
56530+
56531+__u32
56532+gr_acl_handle_mkdir(const struct dentry * new_dentry,
56533+ const struct dentry * parent_dentry,
56534+ const struct vfsmount * parent_mnt)
56535+{
56536+ return 1;
56537+}
56538+
56539+__u32
56540+gr_acl_handle_symlink(const struct dentry * new_dentry,
56541+ const struct dentry * parent_dentry,
56542+ const struct vfsmount * parent_mnt, const char *from)
56543+{
56544+ return 1;
56545+}
56546+
56547+__u32
56548+gr_acl_handle_link(const struct dentry * new_dentry,
56549+ const struct dentry * parent_dentry,
56550+ const struct vfsmount * parent_mnt,
56551+ const struct dentry * old_dentry,
56552+ const struct vfsmount * old_mnt, const char *to)
56553+{
56554+ return 1;
56555+}
56556+
56557+int
56558+gr_acl_handle_rename(const struct dentry *new_dentry,
56559+ const struct dentry *parent_dentry,
56560+ const struct vfsmount *parent_mnt,
56561+ const struct dentry *old_dentry,
56562+ const struct inode *old_parent_inode,
56563+ const struct vfsmount *old_mnt, const char *newname)
56564+{
56565+ return 0;
56566+}
56567+
56568+int
56569+gr_acl_handle_filldir(const struct file *file, const char *name,
56570+ const int namelen, const ino_t ino)
56571+{
56572+ return 1;
56573+}
56574+
56575+int
56576+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56577+ const time_t shm_createtime, const uid_t cuid, const int shmid)
56578+{
56579+ return 1;
56580+}
56581+
56582+int
56583+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
56584+{
56585+ return 0;
56586+}
56587+
56588+int
56589+gr_search_accept(const struct socket *sock)
56590+{
56591+ return 0;
56592+}
56593+
56594+int
56595+gr_search_listen(const struct socket *sock)
56596+{
56597+ return 0;
56598+}
56599+
56600+int
56601+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
56602+{
56603+ return 0;
56604+}
56605+
56606+__u32
56607+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
56608+{
56609+ return 1;
56610+}
56611+
56612+__u32
56613+gr_acl_handle_creat(const struct dentry * dentry,
56614+ const struct dentry * p_dentry,
56615+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
56616+ const int imode)
56617+{
56618+ return 1;
56619+}
56620+
56621+void
56622+gr_acl_handle_exit(void)
56623+{
56624+ return;
56625+}
56626+
56627+int
56628+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
56629+{
56630+ return 1;
56631+}
56632+
56633+void
56634+gr_set_role_label(const uid_t uid, const gid_t gid)
56635+{
56636+ return;
56637+}
56638+
56639+int
56640+gr_acl_handle_procpidmem(const struct task_struct *task)
56641+{
56642+ return 0;
56643+}
56644+
56645+int
56646+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
56647+{
56648+ return 0;
56649+}
56650+
56651+int
56652+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
56653+{
56654+ return 0;
56655+}
56656+
56657+void
56658+gr_set_kernel_label(struct task_struct *task)
56659+{
56660+ return;
56661+}
56662+
56663+int
56664+gr_check_user_change(int real, int effective, int fs)
56665+{
56666+ return 0;
56667+}
56668+
56669+int
56670+gr_check_group_change(int real, int effective, int fs)
56671+{
56672+ return 0;
56673+}
56674+
56675+int gr_acl_enable_at_secure(void)
56676+{
56677+ return 0;
56678+}
56679+
56680+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
56681+{
56682+ return dentry->d_inode->i_sb->s_dev;
56683+}
56684+
56685+EXPORT_SYMBOL(gr_learn_resource);
56686+EXPORT_SYMBOL(gr_set_kernel_label);
56687+#ifdef CONFIG_SECURITY
56688+EXPORT_SYMBOL(gr_check_user_change);
56689+EXPORT_SYMBOL(gr_check_group_change);
56690+#endif
56691diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
56692new file mode 100644
56693index 0000000..abfa971
56694--- /dev/null
56695+++ b/grsecurity/grsec_exec.c
56696@@ -0,0 +1,174 @@
56697+#include <linux/kernel.h>
56698+#include <linux/sched.h>
56699+#include <linux/file.h>
56700+#include <linux/binfmts.h>
56701+#include <linux/fs.h>
56702+#include <linux/types.h>
56703+#include <linux/grdefs.h>
56704+#include <linux/grsecurity.h>
56705+#include <linux/grinternal.h>
56706+#include <linux/capability.h>
56707+#include <linux/module.h>
56708+
56709+#include <asm/uaccess.h>
56710+
56711+#ifdef CONFIG_GRKERNSEC_EXECLOG
56712+static char gr_exec_arg_buf[132];
56713+static DEFINE_MUTEX(gr_exec_arg_mutex);
56714+#endif
56715+
56716+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
56717+
56718+void
56719+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
56720+{
56721+#ifdef CONFIG_GRKERNSEC_EXECLOG
56722+ char *grarg = gr_exec_arg_buf;
56723+ unsigned int i, x, execlen = 0;
56724+ char c;
56725+
56726+ if (!((grsec_enable_execlog && grsec_enable_group &&
56727+ in_group_p(grsec_audit_gid))
56728+ || (grsec_enable_execlog && !grsec_enable_group)))
56729+ return;
56730+
56731+ mutex_lock(&gr_exec_arg_mutex);
56732+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
56733+
56734+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
56735+ const char __user *p;
56736+ unsigned int len;
56737+
56738+ p = get_user_arg_ptr(argv, i);
56739+ if (IS_ERR(p))
56740+ goto log;
56741+
56742+ len = strnlen_user(p, 128 - execlen);
56743+ if (len > 128 - execlen)
56744+ len = 128 - execlen;
56745+ else if (len > 0)
56746+ len--;
56747+ if (copy_from_user(grarg + execlen, p, len))
56748+ goto log;
56749+
56750+ /* rewrite unprintable characters */
56751+ for (x = 0; x < len; x++) {
56752+ c = *(grarg + execlen + x);
56753+ if (c < 32 || c > 126)
56754+ *(grarg + execlen + x) = ' ';
56755+ }
56756+
56757+ execlen += len;
56758+ *(grarg + execlen) = ' ';
56759+ *(grarg + execlen + 1) = '\0';
56760+ execlen++;
56761+ }
56762+
56763+ log:
56764+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
56765+ bprm->file->f_path.mnt, grarg);
56766+ mutex_unlock(&gr_exec_arg_mutex);
56767+#endif
56768+ return;
56769+}
56770+
56771+#ifdef CONFIG_GRKERNSEC
56772+extern int gr_acl_is_capable(const int cap);
56773+extern int gr_acl_is_capable_nolog(const int cap);
56774+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
56775+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
56776+extern int gr_chroot_is_capable(const int cap);
56777+extern int gr_chroot_is_capable_nolog(const int cap);
56778+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
56779+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
56780+#endif
56781+
56782+const char *captab_log[] = {
56783+ "CAP_CHOWN",
56784+ "CAP_DAC_OVERRIDE",
56785+ "CAP_DAC_READ_SEARCH",
56786+ "CAP_FOWNER",
56787+ "CAP_FSETID",
56788+ "CAP_KILL",
56789+ "CAP_SETGID",
56790+ "CAP_SETUID",
56791+ "CAP_SETPCAP",
56792+ "CAP_LINUX_IMMUTABLE",
56793+ "CAP_NET_BIND_SERVICE",
56794+ "CAP_NET_BROADCAST",
56795+ "CAP_NET_ADMIN",
56796+ "CAP_NET_RAW",
56797+ "CAP_IPC_LOCK",
56798+ "CAP_IPC_OWNER",
56799+ "CAP_SYS_MODULE",
56800+ "CAP_SYS_RAWIO",
56801+ "CAP_SYS_CHROOT",
56802+ "CAP_SYS_PTRACE",
56803+ "CAP_SYS_PACCT",
56804+ "CAP_SYS_ADMIN",
56805+ "CAP_SYS_BOOT",
56806+ "CAP_SYS_NICE",
56807+ "CAP_SYS_RESOURCE",
56808+ "CAP_SYS_TIME",
56809+ "CAP_SYS_TTY_CONFIG",
56810+ "CAP_MKNOD",
56811+ "CAP_LEASE",
56812+ "CAP_AUDIT_WRITE",
56813+ "CAP_AUDIT_CONTROL",
56814+ "CAP_SETFCAP",
56815+ "CAP_MAC_OVERRIDE",
56816+ "CAP_MAC_ADMIN",
56817+ "CAP_SYSLOG",
56818+ "CAP_WAKE_ALARM"
56819+};
56820+
56821+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
56822+
56823+int gr_is_capable(const int cap)
56824+{
56825+#ifdef CONFIG_GRKERNSEC
56826+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
56827+ return 1;
56828+ return 0;
56829+#else
56830+ return 1;
56831+#endif
56832+}
56833+
56834+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
56835+{
56836+#ifdef CONFIG_GRKERNSEC
56837+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
56838+ return 1;
56839+ return 0;
56840+#else
56841+ return 1;
56842+#endif
56843+}
56844+
56845+int gr_is_capable_nolog(const int cap)
56846+{
56847+#ifdef CONFIG_GRKERNSEC
56848+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
56849+ return 1;
56850+ return 0;
56851+#else
56852+ return 1;
56853+#endif
56854+}
56855+
56856+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
56857+{
56858+#ifdef CONFIG_GRKERNSEC
56859+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
56860+ return 1;
56861+ return 0;
56862+#else
56863+ return 1;
56864+#endif
56865+}
56866+
56867+EXPORT_SYMBOL(gr_is_capable);
56868+EXPORT_SYMBOL(gr_is_capable_nolog);
56869+EXPORT_SYMBOL(gr_task_is_capable);
56870+EXPORT_SYMBOL(gr_task_is_capable_nolog);
56871diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
56872new file mode 100644
56873index 0000000..d3ee748
56874--- /dev/null
56875+++ b/grsecurity/grsec_fifo.c
56876@@ -0,0 +1,24 @@
56877+#include <linux/kernel.h>
56878+#include <linux/sched.h>
56879+#include <linux/fs.h>
56880+#include <linux/file.h>
56881+#include <linux/grinternal.h>
56882+
56883+int
56884+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
56885+ const struct dentry *dir, const int flag, const int acc_mode)
56886+{
56887+#ifdef CONFIG_GRKERNSEC_FIFO
56888+ const struct cred *cred = current_cred();
56889+
56890+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
56891+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
56892+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
56893+ (cred->fsuid != dentry->d_inode->i_uid)) {
56894+ if (!inode_permission(dentry->d_inode, acc_mode))
56895+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
56896+ return -EACCES;
56897+ }
56898+#endif
56899+ return 0;
56900+}
56901diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
56902new file mode 100644
56903index 0000000..8ca18bf
56904--- /dev/null
56905+++ b/grsecurity/grsec_fork.c
56906@@ -0,0 +1,23 @@
56907+#include <linux/kernel.h>
56908+#include <linux/sched.h>
56909+#include <linux/grsecurity.h>
56910+#include <linux/grinternal.h>
56911+#include <linux/errno.h>
56912+
56913+void
56914+gr_log_forkfail(const int retval)
56915+{
56916+#ifdef CONFIG_GRKERNSEC_FORKFAIL
56917+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
56918+ switch (retval) {
56919+ case -EAGAIN:
56920+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
56921+ break;
56922+ case -ENOMEM:
56923+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
56924+ break;
56925+ }
56926+ }
56927+#endif
56928+ return;
56929+}
56930diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
56931new file mode 100644
56932index 0000000..01ddde4
56933--- /dev/null
56934+++ b/grsecurity/grsec_init.c
56935@@ -0,0 +1,277 @@
56936+#include <linux/kernel.h>
56937+#include <linux/sched.h>
56938+#include <linux/mm.h>
56939+#include <linux/gracl.h>
56940+#include <linux/slab.h>
56941+#include <linux/vmalloc.h>
56942+#include <linux/percpu.h>
56943+#include <linux/module.h>
56944+
56945+int grsec_enable_ptrace_readexec;
56946+int grsec_enable_setxid;
56947+int grsec_enable_brute;
56948+int grsec_enable_link;
56949+int grsec_enable_dmesg;
56950+int grsec_enable_harden_ptrace;
56951+int grsec_enable_fifo;
56952+int grsec_enable_execlog;
56953+int grsec_enable_signal;
56954+int grsec_enable_forkfail;
56955+int grsec_enable_audit_ptrace;
56956+int grsec_enable_time;
56957+int grsec_enable_audit_textrel;
56958+int grsec_enable_group;
56959+int grsec_audit_gid;
56960+int grsec_enable_chdir;
56961+int grsec_enable_mount;
56962+int grsec_enable_rofs;
56963+int grsec_enable_chroot_findtask;
56964+int grsec_enable_chroot_mount;
56965+int grsec_enable_chroot_shmat;
56966+int grsec_enable_chroot_fchdir;
56967+int grsec_enable_chroot_double;
56968+int grsec_enable_chroot_pivot;
56969+int grsec_enable_chroot_chdir;
56970+int grsec_enable_chroot_chmod;
56971+int grsec_enable_chroot_mknod;
56972+int grsec_enable_chroot_nice;
56973+int grsec_enable_chroot_execlog;
56974+int grsec_enable_chroot_caps;
56975+int grsec_enable_chroot_sysctl;
56976+int grsec_enable_chroot_unix;
56977+int grsec_enable_tpe;
56978+int grsec_tpe_gid;
56979+int grsec_enable_blackhole;
56980+#ifdef CONFIG_IPV6_MODULE
56981+EXPORT_SYMBOL(grsec_enable_blackhole);
56982+#endif
56983+int grsec_lastack_retries;
56984+int grsec_enable_tpe_all;
56985+int grsec_enable_tpe_invert;
56986+int grsec_enable_socket_all;
56987+int grsec_socket_all_gid;
56988+int grsec_enable_socket_client;
56989+int grsec_socket_client_gid;
56990+int grsec_enable_socket_server;
56991+int grsec_socket_server_gid;
56992+int grsec_resource_logging;
56993+int grsec_disable_privio;
56994+int grsec_enable_log_rwxmaps;
56995+int grsec_lock;
56996+
56997+DEFINE_SPINLOCK(grsec_alert_lock);
56998+unsigned long grsec_alert_wtime = 0;
56999+unsigned long grsec_alert_fyet = 0;
57000+
57001+DEFINE_SPINLOCK(grsec_audit_lock);
57002+
57003+DEFINE_RWLOCK(grsec_exec_file_lock);
57004+
57005+char *gr_shared_page[4];
57006+
57007+char *gr_alert_log_fmt;
57008+char *gr_audit_log_fmt;
57009+char *gr_alert_log_buf;
57010+char *gr_audit_log_buf;
57011+
57012+extern struct gr_arg *gr_usermode;
57013+extern unsigned char *gr_system_salt;
57014+extern unsigned char *gr_system_sum;
57015+
57016+void __init
57017+grsecurity_init(void)
57018+{
57019+ int j;
57020+ /* create the per-cpu shared pages */
57021+
57022+#ifdef CONFIG_X86
57023+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
57024+#endif
57025+
57026+ for (j = 0; j < 4; j++) {
57027+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
57028+ if (gr_shared_page[j] == NULL) {
57029+ panic("Unable to allocate grsecurity shared page");
57030+ return;
57031+ }
57032+ }
57033+
57034+ /* allocate log buffers */
57035+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
57036+ if (!gr_alert_log_fmt) {
57037+ panic("Unable to allocate grsecurity alert log format buffer");
57038+ return;
57039+ }
57040+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
57041+ if (!gr_audit_log_fmt) {
57042+ panic("Unable to allocate grsecurity audit log format buffer");
57043+ return;
57044+ }
57045+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57046+ if (!gr_alert_log_buf) {
57047+ panic("Unable to allocate grsecurity alert log buffer");
57048+ return;
57049+ }
57050+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57051+ if (!gr_audit_log_buf) {
57052+ panic("Unable to allocate grsecurity audit log buffer");
57053+ return;
57054+ }
57055+
57056+ /* allocate memory for authentication structure */
57057+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
57058+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
57059+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
57060+
57061+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
57062+ panic("Unable to allocate grsecurity authentication structure");
57063+ return;
57064+ }
57065+
57066+
57067+#ifdef CONFIG_GRKERNSEC_IO
57068+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
57069+ grsec_disable_privio = 1;
57070+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57071+ grsec_disable_privio = 1;
57072+#else
57073+ grsec_disable_privio = 0;
57074+#endif
57075+#endif
57076+
57077+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
57078+ /* for backward compatibility, tpe_invert always defaults to on if
57079+ enabled in the kernel
57080+ */
57081+ grsec_enable_tpe_invert = 1;
57082+#endif
57083+
57084+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57085+#ifndef CONFIG_GRKERNSEC_SYSCTL
57086+ grsec_lock = 1;
57087+#endif
57088+
57089+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57090+ grsec_enable_audit_textrel = 1;
57091+#endif
57092+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57093+ grsec_enable_log_rwxmaps = 1;
57094+#endif
57095+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
57096+ grsec_enable_group = 1;
57097+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
57098+#endif
57099+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
57100+ grsec_enable_ptrace_readexec = 1;
57101+#endif
57102+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57103+ grsec_enable_chdir = 1;
57104+#endif
57105+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
57106+ grsec_enable_harden_ptrace = 1;
57107+#endif
57108+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57109+ grsec_enable_mount = 1;
57110+#endif
57111+#ifdef CONFIG_GRKERNSEC_LINK
57112+ grsec_enable_link = 1;
57113+#endif
57114+#ifdef CONFIG_GRKERNSEC_BRUTE
57115+ grsec_enable_brute = 1;
57116+#endif
57117+#ifdef CONFIG_GRKERNSEC_DMESG
57118+ grsec_enable_dmesg = 1;
57119+#endif
57120+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
57121+ grsec_enable_blackhole = 1;
57122+ grsec_lastack_retries = 4;
57123+#endif
57124+#ifdef CONFIG_GRKERNSEC_FIFO
57125+ grsec_enable_fifo = 1;
57126+#endif
57127+#ifdef CONFIG_GRKERNSEC_EXECLOG
57128+ grsec_enable_execlog = 1;
57129+#endif
57130+#ifdef CONFIG_GRKERNSEC_SETXID
57131+ grsec_enable_setxid = 1;
57132+#endif
57133+#ifdef CONFIG_GRKERNSEC_SIGNAL
57134+ grsec_enable_signal = 1;
57135+#endif
57136+#ifdef CONFIG_GRKERNSEC_FORKFAIL
57137+ grsec_enable_forkfail = 1;
57138+#endif
57139+#ifdef CONFIG_GRKERNSEC_TIME
57140+ grsec_enable_time = 1;
57141+#endif
57142+#ifdef CONFIG_GRKERNSEC_RESLOG
57143+ grsec_resource_logging = 1;
57144+#endif
57145+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57146+ grsec_enable_chroot_findtask = 1;
57147+#endif
57148+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
57149+ grsec_enable_chroot_unix = 1;
57150+#endif
57151+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
57152+ grsec_enable_chroot_mount = 1;
57153+#endif
57154+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
57155+ grsec_enable_chroot_fchdir = 1;
57156+#endif
57157+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
57158+ grsec_enable_chroot_shmat = 1;
57159+#endif
57160+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57161+ grsec_enable_audit_ptrace = 1;
57162+#endif
57163+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
57164+ grsec_enable_chroot_double = 1;
57165+#endif
57166+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
57167+ grsec_enable_chroot_pivot = 1;
57168+#endif
57169+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
57170+ grsec_enable_chroot_chdir = 1;
57171+#endif
57172+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
57173+ grsec_enable_chroot_chmod = 1;
57174+#endif
57175+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
57176+ grsec_enable_chroot_mknod = 1;
57177+#endif
57178+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57179+ grsec_enable_chroot_nice = 1;
57180+#endif
57181+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
57182+ grsec_enable_chroot_execlog = 1;
57183+#endif
57184+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57185+ grsec_enable_chroot_caps = 1;
57186+#endif
57187+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
57188+ grsec_enable_chroot_sysctl = 1;
57189+#endif
57190+#ifdef CONFIG_GRKERNSEC_TPE
57191+ grsec_enable_tpe = 1;
57192+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
57193+#ifdef CONFIG_GRKERNSEC_TPE_ALL
57194+ grsec_enable_tpe_all = 1;
57195+#endif
57196+#endif
57197+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
57198+ grsec_enable_socket_all = 1;
57199+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
57200+#endif
57201+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
57202+ grsec_enable_socket_client = 1;
57203+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
57204+#endif
57205+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
57206+ grsec_enable_socket_server = 1;
57207+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
57208+#endif
57209+#endif
57210+
57211+ return;
57212+}
57213diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
57214new file mode 100644
57215index 0000000..3efe141
57216--- /dev/null
57217+++ b/grsecurity/grsec_link.c
57218@@ -0,0 +1,43 @@
57219+#include <linux/kernel.h>
57220+#include <linux/sched.h>
57221+#include <linux/fs.h>
57222+#include <linux/file.h>
57223+#include <linux/grinternal.h>
57224+
57225+int
57226+gr_handle_follow_link(const struct inode *parent,
57227+ const struct inode *inode,
57228+ const struct dentry *dentry, const struct vfsmount *mnt)
57229+{
57230+#ifdef CONFIG_GRKERNSEC_LINK
57231+ const struct cred *cred = current_cred();
57232+
57233+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
57234+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
57235+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
57236+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
57237+ return -EACCES;
57238+ }
57239+#endif
57240+ return 0;
57241+}
57242+
57243+int
57244+gr_handle_hardlink(const struct dentry *dentry,
57245+ const struct vfsmount *mnt,
57246+ struct inode *inode, const int mode, const char *to)
57247+{
57248+#ifdef CONFIG_GRKERNSEC_LINK
57249+ const struct cred *cred = current_cred();
57250+
57251+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
57252+ (!S_ISREG(mode) || (mode & S_ISUID) ||
57253+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
57254+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
57255+ !capable(CAP_FOWNER) && cred->uid) {
57256+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
57257+ return -EPERM;
57258+ }
57259+#endif
57260+ return 0;
57261+}
57262diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
57263new file mode 100644
57264index 0000000..a45d2e9
57265--- /dev/null
57266+++ b/grsecurity/grsec_log.c
57267@@ -0,0 +1,322 @@
57268+#include <linux/kernel.h>
57269+#include <linux/sched.h>
57270+#include <linux/file.h>
57271+#include <linux/tty.h>
57272+#include <linux/fs.h>
57273+#include <linux/grinternal.h>
57274+
57275+#ifdef CONFIG_TREE_PREEMPT_RCU
57276+#define DISABLE_PREEMPT() preempt_disable()
57277+#define ENABLE_PREEMPT() preempt_enable()
57278+#else
57279+#define DISABLE_PREEMPT()
57280+#define ENABLE_PREEMPT()
57281+#endif
57282+
57283+#define BEGIN_LOCKS(x) \
57284+ DISABLE_PREEMPT(); \
57285+ rcu_read_lock(); \
57286+ read_lock(&tasklist_lock); \
57287+ read_lock(&grsec_exec_file_lock); \
57288+ if (x != GR_DO_AUDIT) \
57289+ spin_lock(&grsec_alert_lock); \
57290+ else \
57291+ spin_lock(&grsec_audit_lock)
57292+
57293+#define END_LOCKS(x) \
57294+ if (x != GR_DO_AUDIT) \
57295+ spin_unlock(&grsec_alert_lock); \
57296+ else \
57297+ spin_unlock(&grsec_audit_lock); \
57298+ read_unlock(&grsec_exec_file_lock); \
57299+ read_unlock(&tasklist_lock); \
57300+ rcu_read_unlock(); \
57301+ ENABLE_PREEMPT(); \
57302+ if (x == GR_DONT_AUDIT) \
57303+ gr_handle_alertkill(current)
57304+
57305+enum {
57306+ FLOODING,
57307+ NO_FLOODING
57308+};
57309+
57310+extern char *gr_alert_log_fmt;
57311+extern char *gr_audit_log_fmt;
57312+extern char *gr_alert_log_buf;
57313+extern char *gr_audit_log_buf;
57314+
57315+static int gr_log_start(int audit)
57316+{
57317+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
57318+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
57319+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57320+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
57321+ unsigned long curr_secs = get_seconds();
57322+
57323+ if (audit == GR_DO_AUDIT)
57324+ goto set_fmt;
57325+
57326+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
57327+ grsec_alert_wtime = curr_secs;
57328+ grsec_alert_fyet = 0;
57329+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
57330+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
57331+ grsec_alert_fyet++;
57332+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
57333+ grsec_alert_wtime = curr_secs;
57334+ grsec_alert_fyet++;
57335+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
57336+ return FLOODING;
57337+ }
57338+ else return FLOODING;
57339+
57340+set_fmt:
57341+#endif
57342+ memset(buf, 0, PAGE_SIZE);
57343+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
57344+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
57345+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57346+ } else if (current->signal->curr_ip) {
57347+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
57348+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
57349+ } else if (gr_acl_is_enabled()) {
57350+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
57351+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57352+ } else {
57353+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
57354+ strcpy(buf, fmt);
57355+ }
57356+
57357+ return NO_FLOODING;
57358+}
57359+
57360+static void gr_log_middle(int audit, const char *msg, va_list ap)
57361+ __attribute__ ((format (printf, 2, 0)));
57362+
57363+static void gr_log_middle(int audit, const char *msg, va_list ap)
57364+{
57365+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57366+ unsigned int len = strlen(buf);
57367+
57368+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57369+
57370+ return;
57371+}
57372+
57373+static void gr_log_middle_varargs(int audit, const char *msg, ...)
57374+ __attribute__ ((format (printf, 2, 3)));
57375+
57376+static void gr_log_middle_varargs(int audit, const char *msg, ...)
57377+{
57378+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57379+ unsigned int len = strlen(buf);
57380+ va_list ap;
57381+
57382+ va_start(ap, msg);
57383+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57384+ va_end(ap);
57385+
57386+ return;
57387+}
57388+
57389+static void gr_log_end(int audit, int append_default)
57390+{
57391+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57392+
57393+ if (append_default) {
57394+ unsigned int len = strlen(buf);
57395+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
57396+ }
57397+
57398+ printk("%s\n", buf);
57399+
57400+ return;
57401+}
57402+
57403+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
57404+{
57405+ int logtype;
57406+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
57407+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
57408+ void *voidptr = NULL;
57409+ int num1 = 0, num2 = 0;
57410+ unsigned long ulong1 = 0, ulong2 = 0;
57411+ struct dentry *dentry = NULL;
57412+ struct vfsmount *mnt = NULL;
57413+ struct file *file = NULL;
57414+ struct task_struct *task = NULL;
57415+ const struct cred *cred, *pcred;
57416+ va_list ap;
57417+
57418+ BEGIN_LOCKS(audit);
57419+ logtype = gr_log_start(audit);
57420+ if (logtype == FLOODING) {
57421+ END_LOCKS(audit);
57422+ return;
57423+ }
57424+ va_start(ap, argtypes);
57425+ switch (argtypes) {
57426+ case GR_TTYSNIFF:
57427+ task = va_arg(ap, struct task_struct *);
57428+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
57429+ break;
57430+ case GR_SYSCTL_HIDDEN:
57431+ str1 = va_arg(ap, char *);
57432+ gr_log_middle_varargs(audit, msg, result, str1);
57433+ break;
57434+ case GR_RBAC:
57435+ dentry = va_arg(ap, struct dentry *);
57436+ mnt = va_arg(ap, struct vfsmount *);
57437+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
57438+ break;
57439+ case GR_RBAC_STR:
57440+ dentry = va_arg(ap, struct dentry *);
57441+ mnt = va_arg(ap, struct vfsmount *);
57442+ str1 = va_arg(ap, char *);
57443+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
57444+ break;
57445+ case GR_STR_RBAC:
57446+ str1 = va_arg(ap, char *);
57447+ dentry = va_arg(ap, struct dentry *);
57448+ mnt = va_arg(ap, struct vfsmount *);
57449+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
57450+ break;
57451+ case GR_RBAC_MODE2:
57452+ dentry = va_arg(ap, struct dentry *);
57453+ mnt = va_arg(ap, struct vfsmount *);
57454+ str1 = va_arg(ap, char *);
57455+ str2 = va_arg(ap, char *);
57456+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
57457+ break;
57458+ case GR_RBAC_MODE3:
57459+ dentry = va_arg(ap, struct dentry *);
57460+ mnt = va_arg(ap, struct vfsmount *);
57461+ str1 = va_arg(ap, char *);
57462+ str2 = va_arg(ap, char *);
57463+ str3 = va_arg(ap, char *);
57464+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
57465+ break;
57466+ case GR_FILENAME:
57467+ dentry = va_arg(ap, struct dentry *);
57468+ mnt = va_arg(ap, struct vfsmount *);
57469+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
57470+ break;
57471+ case GR_STR_FILENAME:
57472+ str1 = va_arg(ap, char *);
57473+ dentry = va_arg(ap, struct dentry *);
57474+ mnt = va_arg(ap, struct vfsmount *);
57475+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
57476+ break;
57477+ case GR_FILENAME_STR:
57478+ dentry = va_arg(ap, struct dentry *);
57479+ mnt = va_arg(ap, struct vfsmount *);
57480+ str1 = va_arg(ap, char *);
57481+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
57482+ break;
57483+ case GR_FILENAME_TWO_INT:
57484+ dentry = va_arg(ap, struct dentry *);
57485+ mnt = va_arg(ap, struct vfsmount *);
57486+ num1 = va_arg(ap, int);
57487+ num2 = va_arg(ap, int);
57488+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
57489+ break;
57490+ case GR_FILENAME_TWO_INT_STR:
57491+ dentry = va_arg(ap, struct dentry *);
57492+ mnt = va_arg(ap, struct vfsmount *);
57493+ num1 = va_arg(ap, int);
57494+ num2 = va_arg(ap, int);
57495+ str1 = va_arg(ap, char *);
57496+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
57497+ break;
57498+ case GR_TEXTREL:
57499+ file = va_arg(ap, struct file *);
57500+ ulong1 = va_arg(ap, unsigned long);
57501+ ulong2 = va_arg(ap, unsigned long);
57502+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
57503+ break;
57504+ case GR_PTRACE:
57505+ task = va_arg(ap, struct task_struct *);
57506+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
57507+ break;
57508+ case GR_RESOURCE:
57509+ task = va_arg(ap, struct task_struct *);
57510+ cred = __task_cred(task);
57511+ pcred = __task_cred(task->real_parent);
57512+ ulong1 = va_arg(ap, unsigned long);
57513+ str1 = va_arg(ap, char *);
57514+ ulong2 = va_arg(ap, unsigned long);
57515+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57516+ break;
57517+ case GR_CAP:
57518+ task = va_arg(ap, struct task_struct *);
57519+ cred = __task_cred(task);
57520+ pcred = __task_cred(task->real_parent);
57521+ str1 = va_arg(ap, char *);
57522+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57523+ break;
57524+ case GR_SIG:
57525+ str1 = va_arg(ap, char *);
57526+ voidptr = va_arg(ap, void *);
57527+ gr_log_middle_varargs(audit, msg, str1, voidptr);
57528+ break;
57529+ case GR_SIG2:
57530+ task = va_arg(ap, struct task_struct *);
57531+ cred = __task_cred(task);
57532+ pcred = __task_cred(task->real_parent);
57533+ num1 = va_arg(ap, int);
57534+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57535+ break;
57536+ case GR_CRASH1:
57537+ task = va_arg(ap, struct task_struct *);
57538+ cred = __task_cred(task);
57539+ pcred = __task_cred(task->real_parent);
57540+ ulong1 = va_arg(ap, unsigned long);
57541+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
57542+ break;
57543+ case GR_CRASH2:
57544+ task = va_arg(ap, struct task_struct *);
57545+ cred = __task_cred(task);
57546+ pcred = __task_cred(task->real_parent);
57547+ ulong1 = va_arg(ap, unsigned long);
57548+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
57549+ break;
57550+ case GR_RWXMAP:
57551+ file = va_arg(ap, struct file *);
57552+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
57553+ break;
57554+ case GR_PSACCT:
57555+ {
57556+ unsigned int wday, cday;
57557+ __u8 whr, chr;
57558+ __u8 wmin, cmin;
57559+ __u8 wsec, csec;
57560+ char cur_tty[64] = { 0 };
57561+ char parent_tty[64] = { 0 };
57562+
57563+ task = va_arg(ap, struct task_struct *);
57564+ wday = va_arg(ap, unsigned int);
57565+ cday = va_arg(ap, unsigned int);
57566+ whr = va_arg(ap, int);
57567+ chr = va_arg(ap, int);
57568+ wmin = va_arg(ap, int);
57569+ cmin = va_arg(ap, int);
57570+ wsec = va_arg(ap, int);
57571+ csec = va_arg(ap, int);
57572+ ulong1 = va_arg(ap, unsigned long);
57573+ cred = __task_cred(task);
57574+ pcred = __task_cred(task->real_parent);
57575+
57576+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57577+ }
57578+ break;
57579+ default:
57580+ gr_log_middle(audit, msg, ap);
57581+ }
57582+ va_end(ap);
57583+ // these don't need DEFAULTSECARGS printed on the end
57584+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
57585+ gr_log_end(audit, 0);
57586+ else
57587+ gr_log_end(audit, 1);
57588+ END_LOCKS(audit);
57589+}
57590diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
57591new file mode 100644
57592index 0000000..f536303
57593--- /dev/null
57594+++ b/grsecurity/grsec_mem.c
57595@@ -0,0 +1,40 @@
57596+#include <linux/kernel.h>
57597+#include <linux/sched.h>
57598+#include <linux/mm.h>
57599+#include <linux/mman.h>
57600+#include <linux/grinternal.h>
57601+
57602+void
57603+gr_handle_ioperm(void)
57604+{
57605+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
57606+ return;
57607+}
57608+
57609+void
57610+gr_handle_iopl(void)
57611+{
57612+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
57613+ return;
57614+}
57615+
57616+void
57617+gr_handle_mem_readwrite(u64 from, u64 to)
57618+{
57619+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
57620+ return;
57621+}
57622+
57623+void
57624+gr_handle_vm86(void)
57625+{
57626+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
57627+ return;
57628+}
57629+
57630+void
57631+gr_log_badprocpid(const char *entry)
57632+{
57633+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
57634+ return;
57635+}
57636diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
57637new file mode 100644
57638index 0000000..2131422
57639--- /dev/null
57640+++ b/grsecurity/grsec_mount.c
57641@@ -0,0 +1,62 @@
57642+#include <linux/kernel.h>
57643+#include <linux/sched.h>
57644+#include <linux/mount.h>
57645+#include <linux/grsecurity.h>
57646+#include <linux/grinternal.h>
57647+
57648+void
57649+gr_log_remount(const char *devname, const int retval)
57650+{
57651+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57652+ if (grsec_enable_mount && (retval >= 0))
57653+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
57654+#endif
57655+ return;
57656+}
57657+
57658+void
57659+gr_log_unmount(const char *devname, const int retval)
57660+{
57661+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57662+ if (grsec_enable_mount && (retval >= 0))
57663+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
57664+#endif
57665+ return;
57666+}
57667+
57668+void
57669+gr_log_mount(const char *from, const char *to, const int retval)
57670+{
57671+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57672+ if (grsec_enable_mount && (retval >= 0))
57673+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
57674+#endif
57675+ return;
57676+}
57677+
57678+int
57679+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
57680+{
57681+#ifdef CONFIG_GRKERNSEC_ROFS
57682+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
57683+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
57684+ return -EPERM;
57685+ } else
57686+ return 0;
57687+#endif
57688+ return 0;
57689+}
57690+
57691+int
57692+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
57693+{
57694+#ifdef CONFIG_GRKERNSEC_ROFS
57695+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
57696+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
57697+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
57698+ return -EPERM;
57699+ } else
57700+ return 0;
57701+#endif
57702+ return 0;
57703+}
57704diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
57705new file mode 100644
57706index 0000000..a3b12a0
57707--- /dev/null
57708+++ b/grsecurity/grsec_pax.c
57709@@ -0,0 +1,36 @@
57710+#include <linux/kernel.h>
57711+#include <linux/sched.h>
57712+#include <linux/mm.h>
57713+#include <linux/file.h>
57714+#include <linux/grinternal.h>
57715+#include <linux/grsecurity.h>
57716+
57717+void
57718+gr_log_textrel(struct vm_area_struct * vma)
57719+{
57720+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57721+ if (grsec_enable_audit_textrel)
57722+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
57723+#endif
57724+ return;
57725+}
57726+
57727+void
57728+gr_log_rwxmmap(struct file *file)
57729+{
57730+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57731+ if (grsec_enable_log_rwxmaps)
57732+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
57733+#endif
57734+ return;
57735+}
57736+
57737+void
57738+gr_log_rwxmprotect(struct file *file)
57739+{
57740+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57741+ if (grsec_enable_log_rwxmaps)
57742+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
57743+#endif
57744+ return;
57745+}
57746diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
57747new file mode 100644
57748index 0000000..f7f29aa
57749--- /dev/null
57750+++ b/grsecurity/grsec_ptrace.c
57751@@ -0,0 +1,30 @@
57752+#include <linux/kernel.h>
57753+#include <linux/sched.h>
57754+#include <linux/grinternal.h>
57755+#include <linux/security.h>
57756+
57757+void
57758+gr_audit_ptrace(struct task_struct *task)
57759+{
57760+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57761+ if (grsec_enable_audit_ptrace)
57762+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
57763+#endif
57764+ return;
57765+}
57766+
57767+int
57768+gr_ptrace_readexec(struct file *file, int unsafe_flags)
57769+{
57770+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
57771+ const struct dentry *dentry = file->f_path.dentry;
57772+ const struct vfsmount *mnt = file->f_path.mnt;
57773+
57774+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
57775+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
57776+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
57777+ return -EACCES;
57778+ }
57779+#endif
57780+ return 0;
57781+}
57782diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
57783new file mode 100644
57784index 0000000..7a5b2de
57785--- /dev/null
57786+++ b/grsecurity/grsec_sig.c
57787@@ -0,0 +1,207 @@
57788+#include <linux/kernel.h>
57789+#include <linux/sched.h>
57790+#include <linux/delay.h>
57791+#include <linux/grsecurity.h>
57792+#include <linux/grinternal.h>
57793+#include <linux/hardirq.h>
57794+
57795+char *signames[] = {
57796+ [SIGSEGV] = "Segmentation fault",
57797+ [SIGILL] = "Illegal instruction",
57798+ [SIGABRT] = "Abort",
57799+ [SIGBUS] = "Invalid alignment/Bus error"
57800+};
57801+
57802+void
57803+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
57804+{
57805+#ifdef CONFIG_GRKERNSEC_SIGNAL
57806+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
57807+ (sig == SIGABRT) || (sig == SIGBUS))) {
57808+ if (t->pid == current->pid) {
57809+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
57810+ } else {
57811+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
57812+ }
57813+ }
57814+#endif
57815+ return;
57816+}
57817+
57818+int
57819+gr_handle_signal(const struct task_struct *p, const int sig)
57820+{
57821+#ifdef CONFIG_GRKERNSEC
57822+ /* ignore the 0 signal for protected task checks */
57823+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
57824+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
57825+ return -EPERM;
57826+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
57827+ return -EPERM;
57828+ }
57829+#endif
57830+ return 0;
57831+}
57832+
57833+#ifdef CONFIG_GRKERNSEC
57834+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
57835+
57836+int gr_fake_force_sig(int sig, struct task_struct *t)
57837+{
57838+ unsigned long int flags;
57839+ int ret, blocked, ignored;
57840+ struct k_sigaction *action;
57841+
57842+ spin_lock_irqsave(&t->sighand->siglock, flags);
57843+ action = &t->sighand->action[sig-1];
57844+ ignored = action->sa.sa_handler == SIG_IGN;
57845+ blocked = sigismember(&t->blocked, sig);
57846+ if (blocked || ignored) {
57847+ action->sa.sa_handler = SIG_DFL;
57848+ if (blocked) {
57849+ sigdelset(&t->blocked, sig);
57850+ recalc_sigpending_and_wake(t);
57851+ }
57852+ }
57853+ if (action->sa.sa_handler == SIG_DFL)
57854+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
57855+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
57856+
57857+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
57858+
57859+ return ret;
57860+}
57861+#endif
57862+
57863+#ifdef CONFIG_GRKERNSEC_BRUTE
57864+#define GR_USER_BAN_TIME (15 * 60)
57865+
57866+static int __get_dumpable(unsigned long mm_flags)
57867+{
57868+ int ret;
57869+
57870+ ret = mm_flags & MMF_DUMPABLE_MASK;
57871+ return (ret >= 2) ? 2 : ret;
57872+}
57873+#endif
57874+
57875+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
57876+{
57877+#ifdef CONFIG_GRKERNSEC_BRUTE
57878+ uid_t uid = 0;
57879+
57880+ if (!grsec_enable_brute)
57881+ return;
57882+
57883+ rcu_read_lock();
57884+ read_lock(&tasklist_lock);
57885+ read_lock(&grsec_exec_file_lock);
57886+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
57887+ p->real_parent->brute = 1;
57888+ else {
57889+ const struct cred *cred = __task_cred(p), *cred2;
57890+ struct task_struct *tsk, *tsk2;
57891+
57892+ if (!__get_dumpable(mm_flags) && cred->uid) {
57893+ struct user_struct *user;
57894+
57895+ uid = cred->uid;
57896+
57897+ /* this is put upon execution past expiration */
57898+ user = find_user(uid);
57899+ if (user == NULL)
57900+ goto unlock;
57901+ user->banned = 1;
57902+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
57903+ if (user->ban_expires == ~0UL)
57904+ user->ban_expires--;
57905+
57906+ do_each_thread(tsk2, tsk) {
57907+ cred2 = __task_cred(tsk);
57908+ if (tsk != p && cred2->uid == uid)
57909+ gr_fake_force_sig(SIGKILL, tsk);
57910+ } while_each_thread(tsk2, tsk);
57911+ }
57912+ }
57913+unlock:
57914+ read_unlock(&grsec_exec_file_lock);
57915+ read_unlock(&tasklist_lock);
57916+ rcu_read_unlock();
57917+
57918+ if (uid)
57919+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
57920+
57921+#endif
57922+ return;
57923+}
57924+
57925+void gr_handle_brute_check(void)
57926+{
57927+#ifdef CONFIG_GRKERNSEC_BRUTE
57928+ if (current->brute)
57929+ msleep(30 * 1000);
57930+#endif
57931+ return;
57932+}
57933+
57934+void gr_handle_kernel_exploit(void)
57935+{
57936+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
57937+ const struct cred *cred;
57938+ struct task_struct *tsk, *tsk2;
57939+ struct user_struct *user;
57940+ uid_t uid;
57941+
57942+ if (in_irq() || in_serving_softirq() || in_nmi())
57943+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
57944+
57945+ uid = current_uid();
57946+
57947+ if (uid == 0)
57948+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
57949+ else {
57950+ /* kill all the processes of this user, hold a reference
57951+ to their creds struct, and prevent them from creating
57952+ another process until system reset
57953+ */
57954+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
57955+ /* we intentionally leak this ref */
57956+ user = get_uid(current->cred->user);
57957+ if (user) {
57958+ user->banned = 1;
57959+ user->ban_expires = ~0UL;
57960+ }
57961+
57962+ read_lock(&tasklist_lock);
57963+ do_each_thread(tsk2, tsk) {
57964+ cred = __task_cred(tsk);
57965+ if (cred->uid == uid)
57966+ gr_fake_force_sig(SIGKILL, tsk);
57967+ } while_each_thread(tsk2, tsk);
57968+ read_unlock(&tasklist_lock);
57969+ }
57970+#endif
57971+}
57972+
57973+int __gr_process_user_ban(struct user_struct *user)
57974+{
57975+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57976+ if (unlikely(user->banned)) {
57977+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
57978+ user->banned = 0;
57979+ user->ban_expires = 0;
57980+ free_uid(user);
57981+ } else
57982+ return -EPERM;
57983+ }
57984+#endif
57985+ return 0;
57986+}
57987+
57988+int gr_process_user_ban(void)
57989+{
57990+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57991+ return __gr_process_user_ban(current->cred->user);
57992+#endif
57993+ return 0;
57994+}
57995diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
57996new file mode 100644
57997index 0000000..4030d57
57998--- /dev/null
57999+++ b/grsecurity/grsec_sock.c
58000@@ -0,0 +1,244 @@
58001+#include <linux/kernel.h>
58002+#include <linux/module.h>
58003+#include <linux/sched.h>
58004+#include <linux/file.h>
58005+#include <linux/net.h>
58006+#include <linux/in.h>
58007+#include <linux/ip.h>
58008+#include <net/sock.h>
58009+#include <net/inet_sock.h>
58010+#include <linux/grsecurity.h>
58011+#include <linux/grinternal.h>
58012+#include <linux/gracl.h>
58013+
58014+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
58015+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
58016+
58017+EXPORT_SYMBOL(gr_search_udp_recvmsg);
58018+EXPORT_SYMBOL(gr_search_udp_sendmsg);
58019+
58020+#ifdef CONFIG_UNIX_MODULE
58021+EXPORT_SYMBOL(gr_acl_handle_unix);
58022+EXPORT_SYMBOL(gr_acl_handle_mknod);
58023+EXPORT_SYMBOL(gr_handle_chroot_unix);
58024+EXPORT_SYMBOL(gr_handle_create);
58025+#endif
58026+
58027+#ifdef CONFIG_GRKERNSEC
58028+#define gr_conn_table_size 32749
58029+struct conn_table_entry {
58030+ struct conn_table_entry *next;
58031+ struct signal_struct *sig;
58032+};
58033+
58034+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
58035+DEFINE_SPINLOCK(gr_conn_table_lock);
58036+
58037+extern const char * gr_socktype_to_name(unsigned char type);
58038+extern const char * gr_proto_to_name(unsigned char proto);
58039+extern const char * gr_sockfamily_to_name(unsigned char family);
58040+
58041+static __inline__ int
58042+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
58043+{
58044+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
58045+}
58046+
58047+static __inline__ int
58048+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
58049+ __u16 sport, __u16 dport)
58050+{
58051+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
58052+ sig->gr_sport == sport && sig->gr_dport == dport))
58053+ return 1;
58054+ else
58055+ return 0;
58056+}
58057+
58058+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
58059+{
58060+ struct conn_table_entry **match;
58061+ unsigned int index;
58062+
58063+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58064+ sig->gr_sport, sig->gr_dport,
58065+ gr_conn_table_size);
58066+
58067+ newent->sig = sig;
58068+
58069+ match = &gr_conn_table[index];
58070+ newent->next = *match;
58071+ *match = newent;
58072+
58073+ return;
58074+}
58075+
58076+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
58077+{
58078+ struct conn_table_entry *match, *last = NULL;
58079+ unsigned int index;
58080+
58081+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58082+ sig->gr_sport, sig->gr_dport,
58083+ gr_conn_table_size);
58084+
58085+ match = gr_conn_table[index];
58086+ while (match && !conn_match(match->sig,
58087+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
58088+ sig->gr_dport)) {
58089+ last = match;
58090+ match = match->next;
58091+ }
58092+
58093+ if (match) {
58094+ if (last)
58095+ last->next = match->next;
58096+ else
58097+ gr_conn_table[index] = NULL;
58098+ kfree(match);
58099+ }
58100+
58101+ return;
58102+}
58103+
58104+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
58105+ __u16 sport, __u16 dport)
58106+{
58107+ struct conn_table_entry *match;
58108+ unsigned int index;
58109+
58110+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
58111+
58112+ match = gr_conn_table[index];
58113+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
58114+ match = match->next;
58115+
58116+ if (match)
58117+ return match->sig;
58118+ else
58119+ return NULL;
58120+}
58121+
58122+#endif
58123+
58124+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
58125+{
58126+#ifdef CONFIG_GRKERNSEC
58127+ struct signal_struct *sig = task->signal;
58128+ struct conn_table_entry *newent;
58129+
58130+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
58131+ if (newent == NULL)
58132+ return;
58133+ /* no bh lock needed since we are called with bh disabled */
58134+ spin_lock(&gr_conn_table_lock);
58135+ gr_del_task_from_ip_table_nolock(sig);
58136+ sig->gr_saddr = inet->inet_rcv_saddr;
58137+ sig->gr_daddr = inet->inet_daddr;
58138+ sig->gr_sport = inet->inet_sport;
58139+ sig->gr_dport = inet->inet_dport;
58140+ gr_add_to_task_ip_table_nolock(sig, newent);
58141+ spin_unlock(&gr_conn_table_lock);
58142+#endif
58143+ return;
58144+}
58145+
58146+void gr_del_task_from_ip_table(struct task_struct *task)
58147+{
58148+#ifdef CONFIG_GRKERNSEC
58149+ spin_lock_bh(&gr_conn_table_lock);
58150+ gr_del_task_from_ip_table_nolock(task->signal);
58151+ spin_unlock_bh(&gr_conn_table_lock);
58152+#endif
58153+ return;
58154+}
58155+
58156+void
58157+gr_attach_curr_ip(const struct sock *sk)
58158+{
58159+#ifdef CONFIG_GRKERNSEC
58160+ struct signal_struct *p, *set;
58161+ const struct inet_sock *inet = inet_sk(sk);
58162+
58163+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
58164+ return;
58165+
58166+ set = current->signal;
58167+
58168+ spin_lock_bh(&gr_conn_table_lock);
58169+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
58170+ inet->inet_dport, inet->inet_sport);
58171+ if (unlikely(p != NULL)) {
58172+ set->curr_ip = p->curr_ip;
58173+ set->used_accept = 1;
58174+ gr_del_task_from_ip_table_nolock(p);
58175+ spin_unlock_bh(&gr_conn_table_lock);
58176+ return;
58177+ }
58178+ spin_unlock_bh(&gr_conn_table_lock);
58179+
58180+ set->curr_ip = inet->inet_daddr;
58181+ set->used_accept = 1;
58182+#endif
58183+ return;
58184+}
58185+
58186+int
58187+gr_handle_sock_all(const int family, const int type, const int protocol)
58188+{
58189+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58190+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
58191+ (family != AF_UNIX)) {
58192+ if (family == AF_INET)
58193+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
58194+ else
58195+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
58196+ return -EACCES;
58197+ }
58198+#endif
58199+ return 0;
58200+}
58201+
58202+int
58203+gr_handle_sock_server(const struct sockaddr *sck)
58204+{
58205+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58206+ if (grsec_enable_socket_server &&
58207+ in_group_p(grsec_socket_server_gid) &&
58208+ sck && (sck->sa_family != AF_UNIX) &&
58209+ (sck->sa_family != AF_LOCAL)) {
58210+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58211+ return -EACCES;
58212+ }
58213+#endif
58214+ return 0;
58215+}
58216+
58217+int
58218+gr_handle_sock_server_other(const struct sock *sck)
58219+{
58220+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58221+ if (grsec_enable_socket_server &&
58222+ in_group_p(grsec_socket_server_gid) &&
58223+ sck && (sck->sk_family != AF_UNIX) &&
58224+ (sck->sk_family != AF_LOCAL)) {
58225+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58226+ return -EACCES;
58227+ }
58228+#endif
58229+ return 0;
58230+}
58231+
58232+int
58233+gr_handle_sock_client(const struct sockaddr *sck)
58234+{
58235+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58236+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
58237+ sck && (sck->sa_family != AF_UNIX) &&
58238+ (sck->sa_family != AF_LOCAL)) {
58239+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
58240+ return -EACCES;
58241+ }
58242+#endif
58243+ return 0;
58244+}
58245diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
58246new file mode 100644
58247index 0000000..8316f6f
58248--- /dev/null
58249+++ b/grsecurity/grsec_sysctl.c
58250@@ -0,0 +1,453 @@
58251+#include <linux/kernel.h>
58252+#include <linux/sched.h>
58253+#include <linux/sysctl.h>
58254+#include <linux/grsecurity.h>
58255+#include <linux/grinternal.h>
58256+
58257+int
58258+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
58259+{
58260+#ifdef CONFIG_GRKERNSEC_SYSCTL
58261+ if (dirname == NULL || name == NULL)
58262+ return 0;
58263+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
58264+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
58265+ return -EACCES;
58266+ }
58267+#endif
58268+ return 0;
58269+}
58270+
58271+#ifdef CONFIG_GRKERNSEC_ROFS
58272+static int __maybe_unused one = 1;
58273+#endif
58274+
58275+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
58276+struct ctl_table grsecurity_table[] = {
58277+#ifdef CONFIG_GRKERNSEC_SYSCTL
58278+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
58279+#ifdef CONFIG_GRKERNSEC_IO
58280+ {
58281+ .procname = "disable_priv_io",
58282+ .data = &grsec_disable_privio,
58283+ .maxlen = sizeof(int),
58284+ .mode = 0600,
58285+ .proc_handler = &proc_dointvec,
58286+ },
58287+#endif
58288+#endif
58289+#ifdef CONFIG_GRKERNSEC_LINK
58290+ {
58291+ .procname = "linking_restrictions",
58292+ .data = &grsec_enable_link,
58293+ .maxlen = sizeof(int),
58294+ .mode = 0600,
58295+ .proc_handler = &proc_dointvec,
58296+ },
58297+#endif
58298+#ifdef CONFIG_GRKERNSEC_BRUTE
58299+ {
58300+ .procname = "deter_bruteforce",
58301+ .data = &grsec_enable_brute,
58302+ .maxlen = sizeof(int),
58303+ .mode = 0600,
58304+ .proc_handler = &proc_dointvec,
58305+ },
58306+#endif
58307+#ifdef CONFIG_GRKERNSEC_FIFO
58308+ {
58309+ .procname = "fifo_restrictions",
58310+ .data = &grsec_enable_fifo,
58311+ .maxlen = sizeof(int),
58312+ .mode = 0600,
58313+ .proc_handler = &proc_dointvec,
58314+ },
58315+#endif
58316+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58317+ {
58318+ .procname = "ptrace_readexec",
58319+ .data = &grsec_enable_ptrace_readexec,
58320+ .maxlen = sizeof(int),
58321+ .mode = 0600,
58322+ .proc_handler = &proc_dointvec,
58323+ },
58324+#endif
58325+#ifdef CONFIG_GRKERNSEC_SETXID
58326+ {
58327+ .procname = "consistent_setxid",
58328+ .data = &grsec_enable_setxid,
58329+ .maxlen = sizeof(int),
58330+ .mode = 0600,
58331+ .proc_handler = &proc_dointvec,
58332+ },
58333+#endif
58334+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
58335+ {
58336+ .procname = "ip_blackhole",
58337+ .data = &grsec_enable_blackhole,
58338+ .maxlen = sizeof(int),
58339+ .mode = 0600,
58340+ .proc_handler = &proc_dointvec,
58341+ },
58342+ {
58343+ .procname = "lastack_retries",
58344+ .data = &grsec_lastack_retries,
58345+ .maxlen = sizeof(int),
58346+ .mode = 0600,
58347+ .proc_handler = &proc_dointvec,
58348+ },
58349+#endif
58350+#ifdef CONFIG_GRKERNSEC_EXECLOG
58351+ {
58352+ .procname = "exec_logging",
58353+ .data = &grsec_enable_execlog,
58354+ .maxlen = sizeof(int),
58355+ .mode = 0600,
58356+ .proc_handler = &proc_dointvec,
58357+ },
58358+#endif
58359+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58360+ {
58361+ .procname = "rwxmap_logging",
58362+ .data = &grsec_enable_log_rwxmaps,
58363+ .maxlen = sizeof(int),
58364+ .mode = 0600,
58365+ .proc_handler = &proc_dointvec,
58366+ },
58367+#endif
58368+#ifdef CONFIG_GRKERNSEC_SIGNAL
58369+ {
58370+ .procname = "signal_logging",
58371+ .data = &grsec_enable_signal,
58372+ .maxlen = sizeof(int),
58373+ .mode = 0600,
58374+ .proc_handler = &proc_dointvec,
58375+ },
58376+#endif
58377+#ifdef CONFIG_GRKERNSEC_FORKFAIL
58378+ {
58379+ .procname = "forkfail_logging",
58380+ .data = &grsec_enable_forkfail,
58381+ .maxlen = sizeof(int),
58382+ .mode = 0600,
58383+ .proc_handler = &proc_dointvec,
58384+ },
58385+#endif
58386+#ifdef CONFIG_GRKERNSEC_TIME
58387+ {
58388+ .procname = "timechange_logging",
58389+ .data = &grsec_enable_time,
58390+ .maxlen = sizeof(int),
58391+ .mode = 0600,
58392+ .proc_handler = &proc_dointvec,
58393+ },
58394+#endif
58395+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
58396+ {
58397+ .procname = "chroot_deny_shmat",
58398+ .data = &grsec_enable_chroot_shmat,
58399+ .maxlen = sizeof(int),
58400+ .mode = 0600,
58401+ .proc_handler = &proc_dointvec,
58402+ },
58403+#endif
58404+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
58405+ {
58406+ .procname = "chroot_deny_unix",
58407+ .data = &grsec_enable_chroot_unix,
58408+ .maxlen = sizeof(int),
58409+ .mode = 0600,
58410+ .proc_handler = &proc_dointvec,
58411+ },
58412+#endif
58413+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
58414+ {
58415+ .procname = "chroot_deny_mount",
58416+ .data = &grsec_enable_chroot_mount,
58417+ .maxlen = sizeof(int),
58418+ .mode = 0600,
58419+ .proc_handler = &proc_dointvec,
58420+ },
58421+#endif
58422+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
58423+ {
58424+ .procname = "chroot_deny_fchdir",
58425+ .data = &grsec_enable_chroot_fchdir,
58426+ .maxlen = sizeof(int),
58427+ .mode = 0600,
58428+ .proc_handler = &proc_dointvec,
58429+ },
58430+#endif
58431+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
58432+ {
58433+ .procname = "chroot_deny_chroot",
58434+ .data = &grsec_enable_chroot_double,
58435+ .maxlen = sizeof(int),
58436+ .mode = 0600,
58437+ .proc_handler = &proc_dointvec,
58438+ },
58439+#endif
58440+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
58441+ {
58442+ .procname = "chroot_deny_pivot",
58443+ .data = &grsec_enable_chroot_pivot,
58444+ .maxlen = sizeof(int),
58445+ .mode = 0600,
58446+ .proc_handler = &proc_dointvec,
58447+ },
58448+#endif
58449+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
58450+ {
58451+ .procname = "chroot_enforce_chdir",
58452+ .data = &grsec_enable_chroot_chdir,
58453+ .maxlen = sizeof(int),
58454+ .mode = 0600,
58455+ .proc_handler = &proc_dointvec,
58456+ },
58457+#endif
58458+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
58459+ {
58460+ .procname = "chroot_deny_chmod",
58461+ .data = &grsec_enable_chroot_chmod,
58462+ .maxlen = sizeof(int),
58463+ .mode = 0600,
58464+ .proc_handler = &proc_dointvec,
58465+ },
58466+#endif
58467+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
58468+ {
58469+ .procname = "chroot_deny_mknod",
58470+ .data = &grsec_enable_chroot_mknod,
58471+ .maxlen = sizeof(int),
58472+ .mode = 0600,
58473+ .proc_handler = &proc_dointvec,
58474+ },
58475+#endif
58476+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
58477+ {
58478+ .procname = "chroot_restrict_nice",
58479+ .data = &grsec_enable_chroot_nice,
58480+ .maxlen = sizeof(int),
58481+ .mode = 0600,
58482+ .proc_handler = &proc_dointvec,
58483+ },
58484+#endif
58485+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
58486+ {
58487+ .procname = "chroot_execlog",
58488+ .data = &grsec_enable_chroot_execlog,
58489+ .maxlen = sizeof(int),
58490+ .mode = 0600,
58491+ .proc_handler = &proc_dointvec,
58492+ },
58493+#endif
58494+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
58495+ {
58496+ .procname = "chroot_caps",
58497+ .data = &grsec_enable_chroot_caps,
58498+ .maxlen = sizeof(int),
58499+ .mode = 0600,
58500+ .proc_handler = &proc_dointvec,
58501+ },
58502+#endif
58503+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
58504+ {
58505+ .procname = "chroot_deny_sysctl",
58506+ .data = &grsec_enable_chroot_sysctl,
58507+ .maxlen = sizeof(int),
58508+ .mode = 0600,
58509+ .proc_handler = &proc_dointvec,
58510+ },
58511+#endif
58512+#ifdef CONFIG_GRKERNSEC_TPE
58513+ {
58514+ .procname = "tpe",
58515+ .data = &grsec_enable_tpe,
58516+ .maxlen = sizeof(int),
58517+ .mode = 0600,
58518+ .proc_handler = &proc_dointvec,
58519+ },
58520+ {
58521+ .procname = "tpe_gid",
58522+ .data = &grsec_tpe_gid,
58523+ .maxlen = sizeof(int),
58524+ .mode = 0600,
58525+ .proc_handler = &proc_dointvec,
58526+ },
58527+#endif
58528+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58529+ {
58530+ .procname = "tpe_invert",
58531+ .data = &grsec_enable_tpe_invert,
58532+ .maxlen = sizeof(int),
58533+ .mode = 0600,
58534+ .proc_handler = &proc_dointvec,
58535+ },
58536+#endif
58537+#ifdef CONFIG_GRKERNSEC_TPE_ALL
58538+ {
58539+ .procname = "tpe_restrict_all",
58540+ .data = &grsec_enable_tpe_all,
58541+ .maxlen = sizeof(int),
58542+ .mode = 0600,
58543+ .proc_handler = &proc_dointvec,
58544+ },
58545+#endif
58546+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58547+ {
58548+ .procname = "socket_all",
58549+ .data = &grsec_enable_socket_all,
58550+ .maxlen = sizeof(int),
58551+ .mode = 0600,
58552+ .proc_handler = &proc_dointvec,
58553+ },
58554+ {
58555+ .procname = "socket_all_gid",
58556+ .data = &grsec_socket_all_gid,
58557+ .maxlen = sizeof(int),
58558+ .mode = 0600,
58559+ .proc_handler = &proc_dointvec,
58560+ },
58561+#endif
58562+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58563+ {
58564+ .procname = "socket_client",
58565+ .data = &grsec_enable_socket_client,
58566+ .maxlen = sizeof(int),
58567+ .mode = 0600,
58568+ .proc_handler = &proc_dointvec,
58569+ },
58570+ {
58571+ .procname = "socket_client_gid",
58572+ .data = &grsec_socket_client_gid,
58573+ .maxlen = sizeof(int),
58574+ .mode = 0600,
58575+ .proc_handler = &proc_dointvec,
58576+ },
58577+#endif
58578+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58579+ {
58580+ .procname = "socket_server",
58581+ .data = &grsec_enable_socket_server,
58582+ .maxlen = sizeof(int),
58583+ .mode = 0600,
58584+ .proc_handler = &proc_dointvec,
58585+ },
58586+ {
58587+ .procname = "socket_server_gid",
58588+ .data = &grsec_socket_server_gid,
58589+ .maxlen = sizeof(int),
58590+ .mode = 0600,
58591+ .proc_handler = &proc_dointvec,
58592+ },
58593+#endif
58594+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
58595+ {
58596+ .procname = "audit_group",
58597+ .data = &grsec_enable_group,
58598+ .maxlen = sizeof(int),
58599+ .mode = 0600,
58600+ .proc_handler = &proc_dointvec,
58601+ },
58602+ {
58603+ .procname = "audit_gid",
58604+ .data = &grsec_audit_gid,
58605+ .maxlen = sizeof(int),
58606+ .mode = 0600,
58607+ .proc_handler = &proc_dointvec,
58608+ },
58609+#endif
58610+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
58611+ {
58612+ .procname = "audit_chdir",
58613+ .data = &grsec_enable_chdir,
58614+ .maxlen = sizeof(int),
58615+ .mode = 0600,
58616+ .proc_handler = &proc_dointvec,
58617+ },
58618+#endif
58619+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58620+ {
58621+ .procname = "audit_mount",
58622+ .data = &grsec_enable_mount,
58623+ .maxlen = sizeof(int),
58624+ .mode = 0600,
58625+ .proc_handler = &proc_dointvec,
58626+ },
58627+#endif
58628+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58629+ {
58630+ .procname = "audit_textrel",
58631+ .data = &grsec_enable_audit_textrel,
58632+ .maxlen = sizeof(int),
58633+ .mode = 0600,
58634+ .proc_handler = &proc_dointvec,
58635+ },
58636+#endif
58637+#ifdef CONFIG_GRKERNSEC_DMESG
58638+ {
58639+ .procname = "dmesg",
58640+ .data = &grsec_enable_dmesg,
58641+ .maxlen = sizeof(int),
58642+ .mode = 0600,
58643+ .proc_handler = &proc_dointvec,
58644+ },
58645+#endif
58646+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58647+ {
58648+ .procname = "chroot_findtask",
58649+ .data = &grsec_enable_chroot_findtask,
58650+ .maxlen = sizeof(int),
58651+ .mode = 0600,
58652+ .proc_handler = &proc_dointvec,
58653+ },
58654+#endif
58655+#ifdef CONFIG_GRKERNSEC_RESLOG
58656+ {
58657+ .procname = "resource_logging",
58658+ .data = &grsec_resource_logging,
58659+ .maxlen = sizeof(int),
58660+ .mode = 0600,
58661+ .proc_handler = &proc_dointvec,
58662+ },
58663+#endif
58664+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58665+ {
58666+ .procname = "audit_ptrace",
58667+ .data = &grsec_enable_audit_ptrace,
58668+ .maxlen = sizeof(int),
58669+ .mode = 0600,
58670+ .proc_handler = &proc_dointvec,
58671+ },
58672+#endif
58673+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
58674+ {
58675+ .procname = "harden_ptrace",
58676+ .data = &grsec_enable_harden_ptrace,
58677+ .maxlen = sizeof(int),
58678+ .mode = 0600,
58679+ .proc_handler = &proc_dointvec,
58680+ },
58681+#endif
58682+ {
58683+ .procname = "grsec_lock",
58684+ .data = &grsec_lock,
58685+ .maxlen = sizeof(int),
58686+ .mode = 0600,
58687+ .proc_handler = &proc_dointvec,
58688+ },
58689+#endif
58690+#ifdef CONFIG_GRKERNSEC_ROFS
58691+ {
58692+ .procname = "romount_protect",
58693+ .data = &grsec_enable_rofs,
58694+ .maxlen = sizeof(int),
58695+ .mode = 0600,
58696+ .proc_handler = &proc_dointvec_minmax,
58697+ .extra1 = &one,
58698+ .extra2 = &one,
58699+ },
58700+#endif
58701+ { }
58702+};
58703+#endif
58704diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
58705new file mode 100644
58706index 0000000..0dc13c3
58707--- /dev/null
58708+++ b/grsecurity/grsec_time.c
58709@@ -0,0 +1,16 @@
58710+#include <linux/kernel.h>
58711+#include <linux/sched.h>
58712+#include <linux/grinternal.h>
58713+#include <linux/module.h>
58714+
58715+void
58716+gr_log_timechange(void)
58717+{
58718+#ifdef CONFIG_GRKERNSEC_TIME
58719+ if (grsec_enable_time)
58720+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
58721+#endif
58722+ return;
58723+}
58724+
58725+EXPORT_SYMBOL(gr_log_timechange);
58726diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
58727new file mode 100644
58728index 0000000..07e0dc0
58729--- /dev/null
58730+++ b/grsecurity/grsec_tpe.c
58731@@ -0,0 +1,73 @@
58732+#include <linux/kernel.h>
58733+#include <linux/sched.h>
58734+#include <linux/file.h>
58735+#include <linux/fs.h>
58736+#include <linux/grinternal.h>
58737+
58738+extern int gr_acl_tpe_check(void);
58739+
58740+int
58741+gr_tpe_allow(const struct file *file)
58742+{
58743+#ifdef CONFIG_GRKERNSEC
58744+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
58745+ const struct cred *cred = current_cred();
58746+ char *msg = NULL;
58747+ char *msg2 = NULL;
58748+
58749+ // never restrict root
58750+ if (!cred->uid)
58751+ return 1;
58752+
58753+ if (grsec_enable_tpe) {
58754+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58755+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
58756+ msg = "not being in trusted group";
58757+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
58758+ msg = "being in untrusted group";
58759+#else
58760+ if (in_group_p(grsec_tpe_gid))
58761+ msg = "being in untrusted group";
58762+#endif
58763+ }
58764+ if (!msg && gr_acl_tpe_check())
58765+ msg = "being in untrusted role";
58766+
58767+ // not in any affected group/role
58768+ if (!msg)
58769+ goto next_check;
58770+
58771+ if (inode->i_uid)
58772+ msg2 = "file in non-root-owned directory";
58773+ else if (inode->i_mode & S_IWOTH)
58774+ msg2 = "file in world-writable directory";
58775+ else if (inode->i_mode & S_IWGRP)
58776+ msg2 = "file in group-writable directory";
58777+
58778+ if (msg && msg2) {
58779+ char fullmsg[70] = {0};
58780+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
58781+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
58782+ return 0;
58783+ }
58784+ msg = NULL;
58785+next_check:
58786+#ifdef CONFIG_GRKERNSEC_TPE_ALL
58787+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
58788+ return 1;
58789+
58790+ if (inode->i_uid && (inode->i_uid != cred->uid))
58791+ msg = "directory not owned by user";
58792+ else if (inode->i_mode & S_IWOTH)
58793+ msg = "file in world-writable directory";
58794+ else if (inode->i_mode & S_IWGRP)
58795+ msg = "file in group-writable directory";
58796+
58797+ if (msg) {
58798+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
58799+ return 0;
58800+ }
58801+#endif
58802+#endif
58803+ return 1;
58804+}
58805diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
58806new file mode 100644
58807index 0000000..9f7b1ac
58808--- /dev/null
58809+++ b/grsecurity/grsum.c
58810@@ -0,0 +1,61 @@
58811+#include <linux/err.h>
58812+#include <linux/kernel.h>
58813+#include <linux/sched.h>
58814+#include <linux/mm.h>
58815+#include <linux/scatterlist.h>
58816+#include <linux/crypto.h>
58817+#include <linux/gracl.h>
58818+
58819+
58820+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
58821+#error "crypto and sha256 must be built into the kernel"
58822+#endif
58823+
58824+int
58825+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
58826+{
58827+ char *p;
58828+ struct crypto_hash *tfm;
58829+ struct hash_desc desc;
58830+ struct scatterlist sg;
58831+ unsigned char temp_sum[GR_SHA_LEN];
58832+ volatile int retval = 0;
58833+ volatile int dummy = 0;
58834+ unsigned int i;
58835+
58836+ sg_init_table(&sg, 1);
58837+
58838+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
58839+ if (IS_ERR(tfm)) {
58840+ /* should never happen, since sha256 should be built in */
58841+ return 1;
58842+ }
58843+
58844+ desc.tfm = tfm;
58845+ desc.flags = 0;
58846+
58847+ crypto_hash_init(&desc);
58848+
58849+ p = salt;
58850+ sg_set_buf(&sg, p, GR_SALT_LEN);
58851+ crypto_hash_update(&desc, &sg, sg.length);
58852+
58853+ p = entry->pw;
58854+ sg_set_buf(&sg, p, strlen(p));
58855+
58856+ crypto_hash_update(&desc, &sg, sg.length);
58857+
58858+ crypto_hash_final(&desc, temp_sum);
58859+
58860+ memset(entry->pw, 0, GR_PW_LEN);
58861+
58862+ for (i = 0; i < GR_SHA_LEN; i++)
58863+ if (sum[i] != temp_sum[i])
58864+ retval = 1;
58865+ else
58866+ dummy = 1; // waste a cycle
58867+
58868+ crypto_free_hash(tfm);
58869+
58870+ return retval;
58871+}
58872diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
58873index f1c8ca6..b5c1cc7 100644
58874--- a/include/acpi/acpi_bus.h
58875+++ b/include/acpi/acpi_bus.h
58876@@ -107,7 +107,7 @@ struct acpi_device_ops {
58877 acpi_op_bind bind;
58878 acpi_op_unbind unbind;
58879 acpi_op_notify notify;
58880-};
58881+} __no_const;
58882
58883 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
58884
58885diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
58886index b7babf0..71e4e74 100644
58887--- a/include/asm-generic/atomic-long.h
58888+++ b/include/asm-generic/atomic-long.h
58889@@ -22,6 +22,12 @@
58890
58891 typedef atomic64_t atomic_long_t;
58892
58893+#ifdef CONFIG_PAX_REFCOUNT
58894+typedef atomic64_unchecked_t atomic_long_unchecked_t;
58895+#else
58896+typedef atomic64_t atomic_long_unchecked_t;
58897+#endif
58898+
58899 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
58900
58901 static inline long atomic_long_read(atomic_long_t *l)
58902@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
58903 return (long)atomic64_read(v);
58904 }
58905
58906+#ifdef CONFIG_PAX_REFCOUNT
58907+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
58908+{
58909+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58910+
58911+ return (long)atomic64_read_unchecked(v);
58912+}
58913+#endif
58914+
58915 static inline void atomic_long_set(atomic_long_t *l, long i)
58916 {
58917 atomic64_t *v = (atomic64_t *)l;
58918@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
58919 atomic64_set(v, i);
58920 }
58921
58922+#ifdef CONFIG_PAX_REFCOUNT
58923+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
58924+{
58925+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58926+
58927+ atomic64_set_unchecked(v, i);
58928+}
58929+#endif
58930+
58931 static inline void atomic_long_inc(atomic_long_t *l)
58932 {
58933 atomic64_t *v = (atomic64_t *)l;
58934@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
58935 atomic64_inc(v);
58936 }
58937
58938+#ifdef CONFIG_PAX_REFCOUNT
58939+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
58940+{
58941+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58942+
58943+ atomic64_inc_unchecked(v);
58944+}
58945+#endif
58946+
58947 static inline void atomic_long_dec(atomic_long_t *l)
58948 {
58949 atomic64_t *v = (atomic64_t *)l;
58950@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
58951 atomic64_dec(v);
58952 }
58953
58954+#ifdef CONFIG_PAX_REFCOUNT
58955+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
58956+{
58957+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58958+
58959+ atomic64_dec_unchecked(v);
58960+}
58961+#endif
58962+
58963 static inline void atomic_long_add(long i, atomic_long_t *l)
58964 {
58965 atomic64_t *v = (atomic64_t *)l;
58966@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
58967 atomic64_add(i, v);
58968 }
58969
58970+#ifdef CONFIG_PAX_REFCOUNT
58971+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
58972+{
58973+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58974+
58975+ atomic64_add_unchecked(i, v);
58976+}
58977+#endif
58978+
58979 static inline void atomic_long_sub(long i, atomic_long_t *l)
58980 {
58981 atomic64_t *v = (atomic64_t *)l;
58982@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
58983 atomic64_sub(i, v);
58984 }
58985
58986+#ifdef CONFIG_PAX_REFCOUNT
58987+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
58988+{
58989+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58990+
58991+ atomic64_sub_unchecked(i, v);
58992+}
58993+#endif
58994+
58995 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
58996 {
58997 atomic64_t *v = (atomic64_t *)l;
58998@@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
58999 return (long)atomic64_inc_return(v);
59000 }
59001
59002+#ifdef CONFIG_PAX_REFCOUNT
59003+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
59004+{
59005+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59006+
59007+ return (long)atomic64_inc_return_unchecked(v);
59008+}
59009+#endif
59010+
59011 static inline long atomic_long_dec_return(atomic_long_t *l)
59012 {
59013 atomic64_t *v = (atomic64_t *)l;
59014@@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
59015
59016 typedef atomic_t atomic_long_t;
59017
59018+#ifdef CONFIG_PAX_REFCOUNT
59019+typedef atomic_unchecked_t atomic_long_unchecked_t;
59020+#else
59021+typedef atomic_t atomic_long_unchecked_t;
59022+#endif
59023+
59024 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
59025 static inline long atomic_long_read(atomic_long_t *l)
59026 {
59027@@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
59028 return (long)atomic_read(v);
59029 }
59030
59031+#ifdef CONFIG_PAX_REFCOUNT
59032+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
59033+{
59034+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59035+
59036+ return (long)atomic_read_unchecked(v);
59037+}
59038+#endif
59039+
59040 static inline void atomic_long_set(atomic_long_t *l, long i)
59041 {
59042 atomic_t *v = (atomic_t *)l;
59043@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
59044 atomic_set(v, i);
59045 }
59046
59047+#ifdef CONFIG_PAX_REFCOUNT
59048+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
59049+{
59050+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59051+
59052+ atomic_set_unchecked(v, i);
59053+}
59054+#endif
59055+
59056 static inline void atomic_long_inc(atomic_long_t *l)
59057 {
59058 atomic_t *v = (atomic_t *)l;
59059@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
59060 atomic_inc(v);
59061 }
59062
59063+#ifdef CONFIG_PAX_REFCOUNT
59064+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
59065+{
59066+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59067+
59068+ atomic_inc_unchecked(v);
59069+}
59070+#endif
59071+
59072 static inline void atomic_long_dec(atomic_long_t *l)
59073 {
59074 atomic_t *v = (atomic_t *)l;
59075@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
59076 atomic_dec(v);
59077 }
59078
59079+#ifdef CONFIG_PAX_REFCOUNT
59080+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59081+{
59082+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59083+
59084+ atomic_dec_unchecked(v);
59085+}
59086+#endif
59087+
59088 static inline void atomic_long_add(long i, atomic_long_t *l)
59089 {
59090 atomic_t *v = (atomic_t *)l;
59091@@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
59092 atomic_add(i, v);
59093 }
59094
59095+#ifdef CONFIG_PAX_REFCOUNT
59096+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
59097+{
59098+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59099+
59100+ atomic_add_unchecked(i, v);
59101+}
59102+#endif
59103+
59104 static inline void atomic_long_sub(long i, atomic_long_t *l)
59105 {
59106 atomic_t *v = (atomic_t *)l;
59107@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
59108 atomic_sub(i, v);
59109 }
59110
59111+#ifdef CONFIG_PAX_REFCOUNT
59112+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59113+{
59114+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59115+
59116+ atomic_sub_unchecked(i, v);
59117+}
59118+#endif
59119+
59120 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59121 {
59122 atomic_t *v = (atomic_t *)l;
59123@@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
59124 return (long)atomic_inc_return(v);
59125 }
59126
59127+#ifdef CONFIG_PAX_REFCOUNT
59128+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
59129+{
59130+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59131+
59132+ return (long)atomic_inc_return_unchecked(v);
59133+}
59134+#endif
59135+
59136 static inline long atomic_long_dec_return(atomic_long_t *l)
59137 {
59138 atomic_t *v = (atomic_t *)l;
59139@@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
59140
59141 #endif /* BITS_PER_LONG == 64 */
59142
59143+#ifdef CONFIG_PAX_REFCOUNT
59144+static inline void pax_refcount_needs_these_functions(void)
59145+{
59146+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
59147+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
59148+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
59149+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
59150+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
59151+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
59152+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
59153+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
59154+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
59155+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
59156+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
59157+
59158+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
59159+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
59160+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
59161+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
59162+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
59163+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
59164+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
59165+}
59166+#else
59167+#define atomic_read_unchecked(v) atomic_read(v)
59168+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
59169+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
59170+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
59171+#define atomic_inc_unchecked(v) atomic_inc(v)
59172+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
59173+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
59174+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
59175+#define atomic_dec_unchecked(v) atomic_dec(v)
59176+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
59177+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
59178+
59179+#define atomic_long_read_unchecked(v) atomic_long_read(v)
59180+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
59181+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
59182+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
59183+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
59184+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
59185+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
59186+#endif
59187+
59188 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
59189diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
59190index b18ce4f..2ee2843 100644
59191--- a/include/asm-generic/atomic64.h
59192+++ b/include/asm-generic/atomic64.h
59193@@ -16,6 +16,8 @@ typedef struct {
59194 long long counter;
59195 } atomic64_t;
59196
59197+typedef atomic64_t atomic64_unchecked_t;
59198+
59199 #define ATOMIC64_INIT(i) { (i) }
59200
59201 extern long long atomic64_read(const atomic64_t *v);
59202@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
59203 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
59204 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
59205
59206+#define atomic64_read_unchecked(v) atomic64_read(v)
59207+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
59208+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
59209+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
59210+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
59211+#define atomic64_inc_unchecked(v) atomic64_inc(v)
59212+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
59213+#define atomic64_dec_unchecked(v) atomic64_dec(v)
59214+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
59215+
59216 #endif /* _ASM_GENERIC_ATOMIC64_H */
59217diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
59218index 1bfcfe5..e04c5c9 100644
59219--- a/include/asm-generic/cache.h
59220+++ b/include/asm-generic/cache.h
59221@@ -6,7 +6,7 @@
59222 * cache lines need to provide their own cache.h.
59223 */
59224
59225-#define L1_CACHE_SHIFT 5
59226-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
59227+#define L1_CACHE_SHIFT 5UL
59228+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
59229
59230 #endif /* __ASM_GENERIC_CACHE_H */
59231diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
59232index 0d68a1e..b74a761 100644
59233--- a/include/asm-generic/emergency-restart.h
59234+++ b/include/asm-generic/emergency-restart.h
59235@@ -1,7 +1,7 @@
59236 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
59237 #define _ASM_GENERIC_EMERGENCY_RESTART_H
59238
59239-static inline void machine_emergency_restart(void)
59240+static inline __noreturn void machine_emergency_restart(void)
59241 {
59242 machine_restart(NULL);
59243 }
59244diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
59245index 0232ccb..13d9165 100644
59246--- a/include/asm-generic/kmap_types.h
59247+++ b/include/asm-generic/kmap_types.h
59248@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
59249 KMAP_D(17) KM_NMI,
59250 KMAP_D(18) KM_NMI_PTE,
59251 KMAP_D(19) KM_KDB,
59252+KMAP_D(20) KM_CLEARPAGE,
59253 /*
59254 * Remember to update debug_kmap_atomic() when adding new kmap types!
59255 */
59256-KMAP_D(20) KM_TYPE_NR
59257+KMAP_D(21) KM_TYPE_NR
59258 };
59259
59260 #undef KMAP_D
59261diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
59262index 9ceb03b..2efbcbd 100644
59263--- a/include/asm-generic/local.h
59264+++ b/include/asm-generic/local.h
59265@@ -39,6 +39,7 @@ typedef struct
59266 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
59267 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
59268 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
59269+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
59270
59271 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
59272 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
59273diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
59274index 725612b..9cc513a 100644
59275--- a/include/asm-generic/pgtable-nopmd.h
59276+++ b/include/asm-generic/pgtable-nopmd.h
59277@@ -1,14 +1,19 @@
59278 #ifndef _PGTABLE_NOPMD_H
59279 #define _PGTABLE_NOPMD_H
59280
59281-#ifndef __ASSEMBLY__
59282-
59283 #include <asm-generic/pgtable-nopud.h>
59284
59285-struct mm_struct;
59286-
59287 #define __PAGETABLE_PMD_FOLDED
59288
59289+#define PMD_SHIFT PUD_SHIFT
59290+#define PTRS_PER_PMD 1
59291+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
59292+#define PMD_MASK (~(PMD_SIZE-1))
59293+
59294+#ifndef __ASSEMBLY__
59295+
59296+struct mm_struct;
59297+
59298 /*
59299 * Having the pmd type consist of a pud gets the size right, and allows
59300 * us to conceptually access the pud entry that this pmd is folded into
59301@@ -16,11 +21,6 @@ struct mm_struct;
59302 */
59303 typedef struct { pud_t pud; } pmd_t;
59304
59305-#define PMD_SHIFT PUD_SHIFT
59306-#define PTRS_PER_PMD 1
59307-#define PMD_SIZE (1UL << PMD_SHIFT)
59308-#define PMD_MASK (~(PMD_SIZE-1))
59309-
59310 /*
59311 * The "pud_xxx()" functions here are trivial for a folded two-level
59312 * setup: the pmd is never bad, and a pmd always exists (as it's folded
59313diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
59314index 810431d..0ec4804f 100644
59315--- a/include/asm-generic/pgtable-nopud.h
59316+++ b/include/asm-generic/pgtable-nopud.h
59317@@ -1,10 +1,15 @@
59318 #ifndef _PGTABLE_NOPUD_H
59319 #define _PGTABLE_NOPUD_H
59320
59321-#ifndef __ASSEMBLY__
59322-
59323 #define __PAGETABLE_PUD_FOLDED
59324
59325+#define PUD_SHIFT PGDIR_SHIFT
59326+#define PTRS_PER_PUD 1
59327+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
59328+#define PUD_MASK (~(PUD_SIZE-1))
59329+
59330+#ifndef __ASSEMBLY__
59331+
59332 /*
59333 * Having the pud type consist of a pgd gets the size right, and allows
59334 * us to conceptually access the pgd entry that this pud is folded into
59335@@ -12,11 +17,6 @@
59336 */
59337 typedef struct { pgd_t pgd; } pud_t;
59338
59339-#define PUD_SHIFT PGDIR_SHIFT
59340-#define PTRS_PER_PUD 1
59341-#define PUD_SIZE (1UL << PUD_SHIFT)
59342-#define PUD_MASK (~(PUD_SIZE-1))
59343-
59344 /*
59345 * The "pgd_xxx()" functions here are trivial for a folded two-level
59346 * setup: the pud is never bad, and a pud always exists (as it's folded
59347@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
59348 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
59349
59350 #define pgd_populate(mm, pgd, pud) do { } while (0)
59351+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
59352 /*
59353 * (puds are folded into pgds so this doesn't get actually called,
59354 * but the define is needed for a generic inline function.)
59355diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
59356index 125c54e..e95c18e 100644
59357--- a/include/asm-generic/pgtable.h
59358+++ b/include/asm-generic/pgtable.h
59359@@ -446,6 +446,18 @@ static inline int pmd_write(pmd_t pmd)
59360 #endif /* __HAVE_ARCH_PMD_WRITE */
59361 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
59362
59363+#ifndef __HAVE_ARCH_READ_PMD_ATOMIC
59364+static inline pmd_t read_pmd_atomic(pmd_t *pmdp)
59365+{
59366+ /*
59367+ * Depend on compiler for an atomic pmd read. NOTE: this is
59368+ * only going to work, if the pmdval_t isn't larger than
59369+ * an unsigned long.
59370+ */
59371+ return *pmdp;
59372+}
59373+#endif /* __HAVE_ARCH_READ_PMD_ATOMIC */
59374+
59375 /*
59376 * This function is meant to be used by sites walking pagetables with
59377 * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
59378@@ -459,11 +471,17 @@ static inline int pmd_write(pmd_t pmd)
59379 * undefined so behaving like if the pmd was none is safe (because it
59380 * can return none anyway). The compiler level barrier() is critically
59381 * important to compute the two checks atomically on the same pmdval.
59382+ *
59383+ * For 32bit kernels with a 64bit large pmd_t this automatically takes
59384+ * care of reading the pmd atomically to avoid SMP race conditions
59385+ * against pmd_populate() when the mmap_sem is hold for reading by the
59386+ * caller (a special atomic read not done by "gcc" as in the generic
59387+ * version above, is also needed when THP is disabled because the page
59388+ * fault can populate the pmd from under us).
59389 */
59390 static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
59391 {
59392- /* depend on compiler for an atomic pmd read */
59393- pmd_t pmdval = *pmd;
59394+ pmd_t pmdval = read_pmd_atomic(pmd);
59395 /*
59396 * The barrier will stabilize the pmdval in a register or on
59397 * the stack so that it will stop changing under the code.
59398@@ -503,6 +521,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
59399 #endif
59400 }
59401
59402+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
59403+static inline unsigned long pax_open_kernel(void) { return 0; }
59404+#endif
59405+
59406+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
59407+static inline unsigned long pax_close_kernel(void) { return 0; }
59408+#endif
59409+
59410 #endif /* CONFIG_MMU */
59411
59412 #endif /* !__ASSEMBLY__ */
59413diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
59414index 8aeadf6..f1dc019 100644
59415--- a/include/asm-generic/vmlinux.lds.h
59416+++ b/include/asm-generic/vmlinux.lds.h
59417@@ -218,6 +218,7 @@
59418 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
59419 VMLINUX_SYMBOL(__start_rodata) = .; \
59420 *(.rodata) *(.rodata.*) \
59421+ *(.data..read_only) \
59422 *(__vermagic) /* Kernel version magic */ \
59423 . = ALIGN(8); \
59424 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
59425@@ -716,17 +717,18 @@
59426 * section in the linker script will go there too. @phdr should have
59427 * a leading colon.
59428 *
59429- * Note that this macros defines __per_cpu_load as an absolute symbol.
59430+ * Note that this macros defines per_cpu_load as an absolute symbol.
59431 * If there is no need to put the percpu section at a predetermined
59432 * address, use PERCPU_SECTION.
59433 */
59434 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
59435- VMLINUX_SYMBOL(__per_cpu_load) = .; \
59436- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
59437+ per_cpu_load = .; \
59438+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
59439 - LOAD_OFFSET) { \
59440+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
59441 PERCPU_INPUT(cacheline) \
59442 } phdr \
59443- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
59444+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
59445
59446 /**
59447 * PERCPU_SECTION - define output section for percpu area, simple version
59448diff --git a/include/drm/drmP.h b/include/drm/drmP.h
59449index dd73104..fde86bd 100644
59450--- a/include/drm/drmP.h
59451+++ b/include/drm/drmP.h
59452@@ -72,6 +72,7 @@
59453 #include <linux/workqueue.h>
59454 #include <linux/poll.h>
59455 #include <asm/pgalloc.h>
59456+#include <asm/local.h>
59457 #include "drm.h"
59458
59459 #include <linux/idr.h>
59460@@ -1074,7 +1075,7 @@ struct drm_device {
59461
59462 /** \name Usage Counters */
59463 /*@{ */
59464- int open_count; /**< Outstanding files open */
59465+ local_t open_count; /**< Outstanding files open */
59466 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
59467 atomic_t vma_count; /**< Outstanding vma areas open */
59468 int buf_use; /**< Buffers in use -- cannot alloc */
59469@@ -1085,7 +1086,7 @@ struct drm_device {
59470 /*@{ */
59471 unsigned long counters;
59472 enum drm_stat_type types[15];
59473- atomic_t counts[15];
59474+ atomic_unchecked_t counts[15];
59475 /*@} */
59476
59477 struct list_head filelist;
59478diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
59479index 37515d1..34fa8b0 100644
59480--- a/include/drm/drm_crtc_helper.h
59481+++ b/include/drm/drm_crtc_helper.h
59482@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
59483
59484 /* disable crtc when not in use - more explicit than dpms off */
59485 void (*disable)(struct drm_crtc *crtc);
59486-};
59487+} __no_const;
59488
59489 struct drm_encoder_helper_funcs {
59490 void (*dpms)(struct drm_encoder *encoder, int mode);
59491@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
59492 struct drm_connector *connector);
59493 /* disable encoder when not in use - more explicit than dpms off */
59494 void (*disable)(struct drm_encoder *encoder);
59495-};
59496+} __no_const;
59497
59498 struct drm_connector_helper_funcs {
59499 int (*get_modes)(struct drm_connector *connector);
59500diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
59501index d6d1da4..fdd1ac5 100644
59502--- a/include/drm/ttm/ttm_memory.h
59503+++ b/include/drm/ttm/ttm_memory.h
59504@@ -48,7 +48,7 @@
59505
59506 struct ttm_mem_shrink {
59507 int (*do_shrink) (struct ttm_mem_shrink *);
59508-};
59509+} __no_const;
59510
59511 /**
59512 * struct ttm_mem_global - Global memory accounting structure.
59513diff --git a/include/linux/a.out.h b/include/linux/a.out.h
59514index e86dfca..40cc55f 100644
59515--- a/include/linux/a.out.h
59516+++ b/include/linux/a.out.h
59517@@ -39,6 +39,14 @@ enum machine_type {
59518 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
59519 };
59520
59521+/* Constants for the N_FLAGS field */
59522+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
59523+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
59524+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
59525+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
59526+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
59527+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
59528+
59529 #if !defined (N_MAGIC)
59530 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
59531 #endif
59532diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
59533index 06fd4bb..1caec0d 100644
59534--- a/include/linux/atmdev.h
59535+++ b/include/linux/atmdev.h
59536@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
59537 #endif
59538
59539 struct k_atm_aal_stats {
59540-#define __HANDLE_ITEM(i) atomic_t i
59541+#define __HANDLE_ITEM(i) atomic_unchecked_t i
59542 __AAL_STAT_ITEMS
59543 #undef __HANDLE_ITEM
59544 };
59545diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
59546index 366422b..1fa7f84 100644
59547--- a/include/linux/binfmts.h
59548+++ b/include/linux/binfmts.h
59549@@ -89,6 +89,7 @@ struct linux_binfmt {
59550 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
59551 int (*load_shlib)(struct file *);
59552 int (*core_dump)(struct coredump_params *cprm);
59553+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
59554 unsigned long min_coredump; /* minimal dump size */
59555 };
59556
59557diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
59558index 4d4ac24..2c3ccce 100644
59559--- a/include/linux/blkdev.h
59560+++ b/include/linux/blkdev.h
59561@@ -1376,7 +1376,7 @@ struct block_device_operations {
59562 /* this callback is with swap_lock and sometimes page table lock held */
59563 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
59564 struct module *owner;
59565-};
59566+} __do_const;
59567
59568 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
59569 unsigned long);
59570diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
59571index 4d1a074..88f929a 100644
59572--- a/include/linux/blktrace_api.h
59573+++ b/include/linux/blktrace_api.h
59574@@ -162,7 +162,7 @@ struct blk_trace {
59575 struct dentry *dir;
59576 struct dentry *dropped_file;
59577 struct dentry *msg_file;
59578- atomic_t dropped;
59579+ atomic_unchecked_t dropped;
59580 };
59581
59582 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
59583diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
59584index 83195fb..0b0f77d 100644
59585--- a/include/linux/byteorder/little_endian.h
59586+++ b/include/linux/byteorder/little_endian.h
59587@@ -42,51 +42,51 @@
59588
59589 static inline __le64 __cpu_to_le64p(const __u64 *p)
59590 {
59591- return (__force __le64)*p;
59592+ return (__force const __le64)*p;
59593 }
59594 static inline __u64 __le64_to_cpup(const __le64 *p)
59595 {
59596- return (__force __u64)*p;
59597+ return (__force const __u64)*p;
59598 }
59599 static inline __le32 __cpu_to_le32p(const __u32 *p)
59600 {
59601- return (__force __le32)*p;
59602+ return (__force const __le32)*p;
59603 }
59604 static inline __u32 __le32_to_cpup(const __le32 *p)
59605 {
59606- return (__force __u32)*p;
59607+ return (__force const __u32)*p;
59608 }
59609 static inline __le16 __cpu_to_le16p(const __u16 *p)
59610 {
59611- return (__force __le16)*p;
59612+ return (__force const __le16)*p;
59613 }
59614 static inline __u16 __le16_to_cpup(const __le16 *p)
59615 {
59616- return (__force __u16)*p;
59617+ return (__force const __u16)*p;
59618 }
59619 static inline __be64 __cpu_to_be64p(const __u64 *p)
59620 {
59621- return (__force __be64)__swab64p(p);
59622+ return (__force const __be64)__swab64p(p);
59623 }
59624 static inline __u64 __be64_to_cpup(const __be64 *p)
59625 {
59626- return __swab64p((__u64 *)p);
59627+ return __swab64p((const __u64 *)p);
59628 }
59629 static inline __be32 __cpu_to_be32p(const __u32 *p)
59630 {
59631- return (__force __be32)__swab32p(p);
59632+ return (__force const __be32)__swab32p(p);
59633 }
59634 static inline __u32 __be32_to_cpup(const __be32 *p)
59635 {
59636- return __swab32p((__u32 *)p);
59637+ return __swab32p((const __u32 *)p);
59638 }
59639 static inline __be16 __cpu_to_be16p(const __u16 *p)
59640 {
59641- return (__force __be16)__swab16p(p);
59642+ return (__force const __be16)__swab16p(p);
59643 }
59644 static inline __u16 __be16_to_cpup(const __be16 *p)
59645 {
59646- return __swab16p((__u16 *)p);
59647+ return __swab16p((const __u16 *)p);
59648 }
59649 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
59650 #define __le64_to_cpus(x) do { (void)(x); } while (0)
59651diff --git a/include/linux/cache.h b/include/linux/cache.h
59652index 4c57065..4307975 100644
59653--- a/include/linux/cache.h
59654+++ b/include/linux/cache.h
59655@@ -16,6 +16,10 @@
59656 #define __read_mostly
59657 #endif
59658
59659+#ifndef __read_only
59660+#define __read_only __read_mostly
59661+#endif
59662+
59663 #ifndef ____cacheline_aligned
59664 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
59665 #endif
59666diff --git a/include/linux/capability.h b/include/linux/capability.h
59667index 12d52de..b5f7fa7 100644
59668--- a/include/linux/capability.h
59669+++ b/include/linux/capability.h
59670@@ -548,6 +548,8 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
59671 extern bool capable(int cap);
59672 extern bool ns_capable(struct user_namespace *ns, int cap);
59673 extern bool nsown_capable(int cap);
59674+extern bool capable_nolog(int cap);
59675+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
59676
59677 /* audit system wants to get cap info from files as well */
59678 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
59679diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
59680index 42e55de..1cd0e66 100644
59681--- a/include/linux/cleancache.h
59682+++ b/include/linux/cleancache.h
59683@@ -31,7 +31,7 @@ struct cleancache_ops {
59684 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
59685 void (*invalidate_inode)(int, struct cleancache_filekey);
59686 void (*invalidate_fs)(int);
59687-};
59688+} __no_const;
59689
59690 extern struct cleancache_ops
59691 cleancache_register_ops(struct cleancache_ops *ops);
59692diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
59693index 2f40791..a62d196 100644
59694--- a/include/linux/compiler-gcc4.h
59695+++ b/include/linux/compiler-gcc4.h
59696@@ -32,6 +32,16 @@
59697 #define __linktime_error(message) __attribute__((__error__(message)))
59698
59699 #if __GNUC_MINOR__ >= 5
59700+
59701+#ifdef CONSTIFY_PLUGIN
59702+#define __no_const __attribute__((no_const))
59703+#define __do_const __attribute__((do_const))
59704+#endif
59705+
59706+#ifdef SIZE_OVERFLOW_PLUGIN
59707+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
59708+#endif
59709+
59710 /*
59711 * Mark a position in code as unreachable. This can be used to
59712 * suppress control flow warnings after asm blocks that transfer
59713@@ -47,6 +57,11 @@
59714 #define __noclone __attribute__((__noclone__))
59715
59716 #endif
59717+
59718+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
59719+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
59720+#define __bos0(ptr) __bos((ptr), 0)
59721+#define __bos1(ptr) __bos((ptr), 1)
59722 #endif
59723
59724 #if __GNUC_MINOR__ > 0
59725diff --git a/include/linux/compiler.h b/include/linux/compiler.h
59726index 923d093..726c17f 100644
59727--- a/include/linux/compiler.h
59728+++ b/include/linux/compiler.h
59729@@ -5,31 +5,62 @@
59730
59731 #ifdef __CHECKER__
59732 # define __user __attribute__((noderef, address_space(1)))
59733+# define __force_user __force __user
59734 # define __kernel __attribute__((address_space(0)))
59735+# define __force_kernel __force __kernel
59736 # define __safe __attribute__((safe))
59737 # define __force __attribute__((force))
59738 # define __nocast __attribute__((nocast))
59739 # define __iomem __attribute__((noderef, address_space(2)))
59740+# define __force_iomem __force __iomem
59741 # define __acquires(x) __attribute__((context(x,0,1)))
59742 # define __releases(x) __attribute__((context(x,1,0)))
59743 # define __acquire(x) __context__(x,1)
59744 # define __release(x) __context__(x,-1)
59745 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
59746 # define __percpu __attribute__((noderef, address_space(3)))
59747+# define __force_percpu __force __percpu
59748 #ifdef CONFIG_SPARSE_RCU_POINTER
59749 # define __rcu __attribute__((noderef, address_space(4)))
59750+# define __force_rcu __force __rcu
59751 #else
59752 # define __rcu
59753+# define __force_rcu
59754 #endif
59755 extern void __chk_user_ptr(const volatile void __user *);
59756 extern void __chk_io_ptr(const volatile void __iomem *);
59757+#elif defined(CHECKER_PLUGIN)
59758+//# define __user
59759+//# define __force_user
59760+//# define __kernel
59761+//# define __force_kernel
59762+# define __safe
59763+# define __force
59764+# define __nocast
59765+# define __iomem
59766+# define __force_iomem
59767+# define __chk_user_ptr(x) (void)0
59768+# define __chk_io_ptr(x) (void)0
59769+# define __builtin_warning(x, y...) (1)
59770+# define __acquires(x)
59771+# define __releases(x)
59772+# define __acquire(x) (void)0
59773+# define __release(x) (void)0
59774+# define __cond_lock(x,c) (c)
59775+# define __percpu
59776+# define __force_percpu
59777+# define __rcu
59778+# define __force_rcu
59779 #else
59780 # define __user
59781+# define __force_user
59782 # define __kernel
59783+# define __force_kernel
59784 # define __safe
59785 # define __force
59786 # define __nocast
59787 # define __iomem
59788+# define __force_iomem
59789 # define __chk_user_ptr(x) (void)0
59790 # define __chk_io_ptr(x) (void)0
59791 # define __builtin_warning(x, y...) (1)
59792@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
59793 # define __release(x) (void)0
59794 # define __cond_lock(x,c) (c)
59795 # define __percpu
59796+# define __force_percpu
59797 # define __rcu
59798+# define __force_rcu
59799 #endif
59800
59801 #ifdef __KERNEL__
59802@@ -264,6 +297,18 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59803 # define __attribute_const__ /* unimplemented */
59804 #endif
59805
59806+#ifndef __no_const
59807+# define __no_const
59808+#endif
59809+
59810+#ifndef __do_const
59811+# define __do_const
59812+#endif
59813+
59814+#ifndef __size_overflow
59815+# define __size_overflow(...)
59816+#endif
59817+
59818 /*
59819 * Tell gcc if a function is cold. The compiler will assume any path
59820 * directly leading to the call is unlikely.
59821@@ -273,6 +318,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59822 #define __cold
59823 #endif
59824
59825+#ifndef __alloc_size
59826+#define __alloc_size(...)
59827+#endif
59828+
59829+#ifndef __bos
59830+#define __bos(ptr, arg)
59831+#endif
59832+
59833+#ifndef __bos0
59834+#define __bos0(ptr)
59835+#endif
59836+
59837+#ifndef __bos1
59838+#define __bos1(ptr)
59839+#endif
59840+
59841 /* Simple shorthand for a section definition */
59842 #ifndef __section
59843 # define __section(S) __attribute__ ((__section__(#S)))
59844@@ -308,6 +369,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59845 * use is to mediate communication between process-level code and irq/NMI
59846 * handlers, all running on the same CPU.
59847 */
59848-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
59849+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
59850+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
59851
59852 #endif /* __LINUX_COMPILER_H */
59853diff --git a/include/linux/cred.h b/include/linux/cred.h
59854index adadf71..6af5560 100644
59855--- a/include/linux/cred.h
59856+++ b/include/linux/cred.h
59857@@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
59858 static inline void validate_process_creds(void)
59859 {
59860 }
59861+static inline void validate_task_creds(struct task_struct *task)
59862+{
59863+}
59864 #endif
59865
59866 /**
59867diff --git a/include/linux/crypto.h b/include/linux/crypto.h
59868index b92eadf..b4ecdc1 100644
59869--- a/include/linux/crypto.h
59870+++ b/include/linux/crypto.h
59871@@ -373,7 +373,7 @@ struct cipher_tfm {
59872 const u8 *key, unsigned int keylen);
59873 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
59874 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
59875-};
59876+} __no_const;
59877
59878 struct hash_tfm {
59879 int (*init)(struct hash_desc *desc);
59880@@ -394,13 +394,13 @@ struct compress_tfm {
59881 int (*cot_decompress)(struct crypto_tfm *tfm,
59882 const u8 *src, unsigned int slen,
59883 u8 *dst, unsigned int *dlen);
59884-};
59885+} __no_const;
59886
59887 struct rng_tfm {
59888 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
59889 unsigned int dlen);
59890 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
59891-};
59892+} __no_const;
59893
59894 #define crt_ablkcipher crt_u.ablkcipher
59895 #define crt_aead crt_u.aead
59896diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
59897index 7925bf0..d5143d2 100644
59898--- a/include/linux/decompress/mm.h
59899+++ b/include/linux/decompress/mm.h
59900@@ -77,7 +77,7 @@ static void free(void *where)
59901 * warnings when not needed (indeed large_malloc / large_free are not
59902 * needed by inflate */
59903
59904-#define malloc(a) kmalloc(a, GFP_KERNEL)
59905+#define malloc(a) kmalloc((a), GFP_KERNEL)
59906 #define free(a) kfree(a)
59907
59908 #define large_malloc(a) vmalloc(a)
59909diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
59910index dfc099e..e583e66 100644
59911--- a/include/linux/dma-mapping.h
59912+++ b/include/linux/dma-mapping.h
59913@@ -51,7 +51,7 @@ struct dma_map_ops {
59914 u64 (*get_required_mask)(struct device *dev);
59915 #endif
59916 int is_phys;
59917-};
59918+} __do_const;
59919
59920 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
59921
59922diff --git a/include/linux/efi.h b/include/linux/efi.h
59923index ec45ccd..9923c32 100644
59924--- a/include/linux/efi.h
59925+++ b/include/linux/efi.h
59926@@ -635,7 +635,7 @@ struct efivar_operations {
59927 efi_get_variable_t *get_variable;
59928 efi_get_next_variable_t *get_next_variable;
59929 efi_set_variable_t *set_variable;
59930-};
59931+} __no_const;
59932
59933 struct efivars {
59934 /*
59935diff --git a/include/linux/elf.h b/include/linux/elf.h
59936index 999b4f5..57753b4 100644
59937--- a/include/linux/elf.h
59938+++ b/include/linux/elf.h
59939@@ -40,6 +40,17 @@ typedef __s64 Elf64_Sxword;
59940 #define PT_GNU_EH_FRAME 0x6474e550
59941
59942 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
59943+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
59944+
59945+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
59946+
59947+/* Constants for the e_flags field */
59948+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
59949+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
59950+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
59951+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
59952+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
59953+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
59954
59955 /*
59956 * Extended Numbering
59957@@ -97,6 +108,8 @@ typedef __s64 Elf64_Sxword;
59958 #define DT_DEBUG 21
59959 #define DT_TEXTREL 22
59960 #define DT_JMPREL 23
59961+#define DT_FLAGS 30
59962+ #define DF_TEXTREL 0x00000004
59963 #define DT_ENCODING 32
59964 #define OLD_DT_LOOS 0x60000000
59965 #define DT_LOOS 0x6000000d
59966@@ -243,6 +256,19 @@ typedef struct elf64_hdr {
59967 #define PF_W 0x2
59968 #define PF_X 0x1
59969
59970+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
59971+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
59972+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
59973+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
59974+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
59975+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
59976+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
59977+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
59978+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
59979+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
59980+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
59981+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
59982+
59983 typedef struct elf32_phdr{
59984 Elf32_Word p_type;
59985 Elf32_Off p_offset;
59986@@ -335,6 +361,8 @@ typedef struct elf64_shdr {
59987 #define EI_OSABI 7
59988 #define EI_PAD 8
59989
59990+#define EI_PAX 14
59991+
59992 #define ELFMAG0 0x7f /* EI_MAG */
59993 #define ELFMAG1 'E'
59994 #define ELFMAG2 'L'
59995@@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
59996 #define elf_note elf32_note
59997 #define elf_addr_t Elf32_Off
59998 #define Elf_Half Elf32_Half
59999+#define elf_dyn Elf32_Dyn
60000
60001 #else
60002
60003@@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
60004 #define elf_note elf64_note
60005 #define elf_addr_t Elf64_Off
60006 #define Elf_Half Elf64_Half
60007+#define elf_dyn Elf64_Dyn
60008
60009 #endif
60010
60011diff --git a/include/linux/filter.h b/include/linux/filter.h
60012index 8eeb205..d59bfa2 100644
60013--- a/include/linux/filter.h
60014+++ b/include/linux/filter.h
60015@@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
60016
60017 struct sk_buff;
60018 struct sock;
60019+struct bpf_jit_work;
60020
60021 struct sk_filter
60022 {
60023@@ -141,6 +142,9 @@ struct sk_filter
60024 unsigned int len; /* Number of filter blocks */
60025 unsigned int (*bpf_func)(const struct sk_buff *skb,
60026 const struct sock_filter *filter);
60027+#ifdef CONFIG_BPF_JIT
60028+ struct bpf_jit_work *work;
60029+#endif
60030 struct rcu_head rcu;
60031 struct sock_filter insns[0];
60032 };
60033diff --git a/include/linux/firewire.h b/include/linux/firewire.h
60034index cdc9b71..ce69fb5 100644
60035--- a/include/linux/firewire.h
60036+++ b/include/linux/firewire.h
60037@@ -413,7 +413,7 @@ struct fw_iso_context {
60038 union {
60039 fw_iso_callback_t sc;
60040 fw_iso_mc_callback_t mc;
60041- } callback;
60042+ } __no_const callback;
60043 void *callback_data;
60044 };
60045
60046diff --git a/include/linux/fs.h b/include/linux/fs.h
60047index 25c40b9..1bfd4f4 100644
60048--- a/include/linux/fs.h
60049+++ b/include/linux/fs.h
60050@@ -1634,7 +1634,8 @@ struct file_operations {
60051 int (*setlease)(struct file *, long, struct file_lock **);
60052 long (*fallocate)(struct file *file, int mode, loff_t offset,
60053 loff_t len);
60054-};
60055+} __do_const;
60056+typedef struct file_operations __no_const file_operations_no_const;
60057
60058 struct inode_operations {
60059 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
60060diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
60061index 003dc0f..3c4ea97 100644
60062--- a/include/linux/fs_struct.h
60063+++ b/include/linux/fs_struct.h
60064@@ -6,7 +6,7 @@
60065 #include <linux/seqlock.h>
60066
60067 struct fs_struct {
60068- int users;
60069+ atomic_t users;
60070 spinlock_t lock;
60071 seqcount_t seq;
60072 int umask;
60073diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
60074index ce31408..b1ad003 100644
60075--- a/include/linux/fscache-cache.h
60076+++ b/include/linux/fscache-cache.h
60077@@ -102,7 +102,7 @@ struct fscache_operation {
60078 fscache_operation_release_t release;
60079 };
60080
60081-extern atomic_t fscache_op_debug_id;
60082+extern atomic_unchecked_t fscache_op_debug_id;
60083 extern void fscache_op_work_func(struct work_struct *work);
60084
60085 extern void fscache_enqueue_operation(struct fscache_operation *);
60086@@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
60087 {
60088 INIT_WORK(&op->work, fscache_op_work_func);
60089 atomic_set(&op->usage, 1);
60090- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
60091+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
60092 op->processor = processor;
60093 op->release = release;
60094 INIT_LIST_HEAD(&op->pend_link);
60095diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
60096index a6dfe69..569586df 100644
60097--- a/include/linux/fsnotify.h
60098+++ b/include/linux/fsnotify.h
60099@@ -315,7 +315,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
60100 */
60101 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
60102 {
60103- return kstrdup(name, GFP_KERNEL);
60104+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
60105 }
60106
60107 /*
60108diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
60109index 91d0e0a3..035666b 100644
60110--- a/include/linux/fsnotify_backend.h
60111+++ b/include/linux/fsnotify_backend.h
60112@@ -105,6 +105,7 @@ struct fsnotify_ops {
60113 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
60114 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
60115 };
60116+typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
60117
60118 /*
60119 * A group is a "thing" that wants to receive notification about filesystem
60120diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
60121index 176a939..1462211 100644
60122--- a/include/linux/ftrace_event.h
60123+++ b/include/linux/ftrace_event.h
60124@@ -97,7 +97,7 @@ struct trace_event_functions {
60125 trace_print_func raw;
60126 trace_print_func hex;
60127 trace_print_func binary;
60128-};
60129+} __no_const;
60130
60131 struct trace_event {
60132 struct hlist_node node;
60133@@ -263,7 +263,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
60134 extern int trace_add_event_call(struct ftrace_event_call *call);
60135 extern void trace_remove_event_call(struct ftrace_event_call *call);
60136
60137-#define is_signed_type(type) (((type)(-1)) < 0)
60138+#define is_signed_type(type) (((type)(-1)) < (type)1)
60139
60140 int trace_set_clr_event(const char *system, const char *event, int set);
60141
60142diff --git a/include/linux/genhd.h b/include/linux/genhd.h
60143index 017a7fb..33a8507 100644
60144--- a/include/linux/genhd.h
60145+++ b/include/linux/genhd.h
60146@@ -185,7 +185,7 @@ struct gendisk {
60147 struct kobject *slave_dir;
60148
60149 struct timer_rand_state *random;
60150- atomic_t sync_io; /* RAID */
60151+ atomic_unchecked_t sync_io; /* RAID */
60152 struct disk_events *ev;
60153 #ifdef CONFIG_BLK_DEV_INTEGRITY
60154 struct blk_integrity *integrity;
60155diff --git a/include/linux/gracl.h b/include/linux/gracl.h
60156new file mode 100644
60157index 0000000..c938b1f
60158--- /dev/null
60159+++ b/include/linux/gracl.h
60160@@ -0,0 +1,319 @@
60161+#ifndef GR_ACL_H
60162+#define GR_ACL_H
60163+
60164+#include <linux/grdefs.h>
60165+#include <linux/resource.h>
60166+#include <linux/capability.h>
60167+#include <linux/dcache.h>
60168+#include <asm/resource.h>
60169+
60170+/* Major status information */
60171+
60172+#define GR_VERSION "grsecurity 2.9.1"
60173+#define GRSECURITY_VERSION 0x2901
60174+
60175+enum {
60176+ GR_SHUTDOWN = 0,
60177+ GR_ENABLE = 1,
60178+ GR_SPROLE = 2,
60179+ GR_RELOAD = 3,
60180+ GR_SEGVMOD = 4,
60181+ GR_STATUS = 5,
60182+ GR_UNSPROLE = 6,
60183+ GR_PASSSET = 7,
60184+ GR_SPROLEPAM = 8,
60185+};
60186+
60187+/* Password setup definitions
60188+ * kernel/grhash.c */
60189+enum {
60190+ GR_PW_LEN = 128,
60191+ GR_SALT_LEN = 16,
60192+ GR_SHA_LEN = 32,
60193+};
60194+
60195+enum {
60196+ GR_SPROLE_LEN = 64,
60197+};
60198+
60199+enum {
60200+ GR_NO_GLOB = 0,
60201+ GR_REG_GLOB,
60202+ GR_CREATE_GLOB
60203+};
60204+
60205+#define GR_NLIMITS 32
60206+
60207+/* Begin Data Structures */
60208+
60209+struct sprole_pw {
60210+ unsigned char *rolename;
60211+ unsigned char salt[GR_SALT_LEN];
60212+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
60213+};
60214+
60215+struct name_entry {
60216+ __u32 key;
60217+ ino_t inode;
60218+ dev_t device;
60219+ char *name;
60220+ __u16 len;
60221+ __u8 deleted;
60222+ struct name_entry *prev;
60223+ struct name_entry *next;
60224+};
60225+
60226+struct inodev_entry {
60227+ struct name_entry *nentry;
60228+ struct inodev_entry *prev;
60229+ struct inodev_entry *next;
60230+};
60231+
60232+struct acl_role_db {
60233+ struct acl_role_label **r_hash;
60234+ __u32 r_size;
60235+};
60236+
60237+struct inodev_db {
60238+ struct inodev_entry **i_hash;
60239+ __u32 i_size;
60240+};
60241+
60242+struct name_db {
60243+ struct name_entry **n_hash;
60244+ __u32 n_size;
60245+};
60246+
60247+struct crash_uid {
60248+ uid_t uid;
60249+ unsigned long expires;
60250+};
60251+
60252+struct gr_hash_struct {
60253+ void **table;
60254+ void **nametable;
60255+ void *first;
60256+ __u32 table_size;
60257+ __u32 used_size;
60258+ int type;
60259+};
60260+
60261+/* Userspace Grsecurity ACL data structures */
60262+
60263+struct acl_subject_label {
60264+ char *filename;
60265+ ino_t inode;
60266+ dev_t device;
60267+ __u32 mode;
60268+ kernel_cap_t cap_mask;
60269+ kernel_cap_t cap_lower;
60270+ kernel_cap_t cap_invert_audit;
60271+
60272+ struct rlimit res[GR_NLIMITS];
60273+ __u32 resmask;
60274+
60275+ __u8 user_trans_type;
60276+ __u8 group_trans_type;
60277+ uid_t *user_transitions;
60278+ gid_t *group_transitions;
60279+ __u16 user_trans_num;
60280+ __u16 group_trans_num;
60281+
60282+ __u32 sock_families[2];
60283+ __u32 ip_proto[8];
60284+ __u32 ip_type;
60285+ struct acl_ip_label **ips;
60286+ __u32 ip_num;
60287+ __u32 inaddr_any_override;
60288+
60289+ __u32 crashes;
60290+ unsigned long expires;
60291+
60292+ struct acl_subject_label *parent_subject;
60293+ struct gr_hash_struct *hash;
60294+ struct acl_subject_label *prev;
60295+ struct acl_subject_label *next;
60296+
60297+ struct acl_object_label **obj_hash;
60298+ __u32 obj_hash_size;
60299+ __u16 pax_flags;
60300+};
60301+
60302+struct role_allowed_ip {
60303+ __u32 addr;
60304+ __u32 netmask;
60305+
60306+ struct role_allowed_ip *prev;
60307+ struct role_allowed_ip *next;
60308+};
60309+
60310+struct role_transition {
60311+ char *rolename;
60312+
60313+ struct role_transition *prev;
60314+ struct role_transition *next;
60315+};
60316+
60317+struct acl_role_label {
60318+ char *rolename;
60319+ uid_t uidgid;
60320+ __u16 roletype;
60321+
60322+ __u16 auth_attempts;
60323+ unsigned long expires;
60324+
60325+ struct acl_subject_label *root_label;
60326+ struct gr_hash_struct *hash;
60327+
60328+ struct acl_role_label *prev;
60329+ struct acl_role_label *next;
60330+
60331+ struct role_transition *transitions;
60332+ struct role_allowed_ip *allowed_ips;
60333+ uid_t *domain_children;
60334+ __u16 domain_child_num;
60335+
60336+ umode_t umask;
60337+
60338+ struct acl_subject_label **subj_hash;
60339+ __u32 subj_hash_size;
60340+};
60341+
60342+struct user_acl_role_db {
60343+ struct acl_role_label **r_table;
60344+ __u32 num_pointers; /* Number of allocations to track */
60345+ __u32 num_roles; /* Number of roles */
60346+ __u32 num_domain_children; /* Number of domain children */
60347+ __u32 num_subjects; /* Number of subjects */
60348+ __u32 num_objects; /* Number of objects */
60349+};
60350+
60351+struct acl_object_label {
60352+ char *filename;
60353+ ino_t inode;
60354+ dev_t device;
60355+ __u32 mode;
60356+
60357+ struct acl_subject_label *nested;
60358+ struct acl_object_label *globbed;
60359+
60360+ /* next two structures not used */
60361+
60362+ struct acl_object_label *prev;
60363+ struct acl_object_label *next;
60364+};
60365+
60366+struct acl_ip_label {
60367+ char *iface;
60368+ __u32 addr;
60369+ __u32 netmask;
60370+ __u16 low, high;
60371+ __u8 mode;
60372+ __u32 type;
60373+ __u32 proto[8];
60374+
60375+ /* next two structures not used */
60376+
60377+ struct acl_ip_label *prev;
60378+ struct acl_ip_label *next;
60379+};
60380+
60381+struct gr_arg {
60382+ struct user_acl_role_db role_db;
60383+ unsigned char pw[GR_PW_LEN];
60384+ unsigned char salt[GR_SALT_LEN];
60385+ unsigned char sum[GR_SHA_LEN];
60386+ unsigned char sp_role[GR_SPROLE_LEN];
60387+ struct sprole_pw *sprole_pws;
60388+ dev_t segv_device;
60389+ ino_t segv_inode;
60390+ uid_t segv_uid;
60391+ __u16 num_sprole_pws;
60392+ __u16 mode;
60393+};
60394+
60395+struct gr_arg_wrapper {
60396+ struct gr_arg *arg;
60397+ __u32 version;
60398+ __u32 size;
60399+};
60400+
60401+struct subject_map {
60402+ struct acl_subject_label *user;
60403+ struct acl_subject_label *kernel;
60404+ struct subject_map *prev;
60405+ struct subject_map *next;
60406+};
60407+
60408+struct acl_subj_map_db {
60409+ struct subject_map **s_hash;
60410+ __u32 s_size;
60411+};
60412+
60413+/* End Data Structures Section */
60414+
60415+/* Hash functions generated by empirical testing by Brad Spengler
60416+ Makes good use of the low bits of the inode. Generally 0-1 times
60417+ in loop for successful match. 0-3 for unsuccessful match.
60418+ Shift/add algorithm with modulus of table size and an XOR*/
60419+
60420+static __inline__ unsigned int
60421+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
60422+{
60423+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
60424+}
60425+
60426+ static __inline__ unsigned int
60427+shash(const struct acl_subject_label *userp, const unsigned int sz)
60428+{
60429+ return ((const unsigned long)userp % sz);
60430+}
60431+
60432+static __inline__ unsigned int
60433+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
60434+{
60435+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
60436+}
60437+
60438+static __inline__ unsigned int
60439+nhash(const char *name, const __u16 len, const unsigned int sz)
60440+{
60441+ return full_name_hash((const unsigned char *)name, len) % sz;
60442+}
60443+
60444+#define FOR_EACH_ROLE_START(role) \
60445+ role = role_list; \
60446+ while (role) {
60447+
60448+#define FOR_EACH_ROLE_END(role) \
60449+ role = role->prev; \
60450+ }
60451+
60452+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
60453+ subj = NULL; \
60454+ iter = 0; \
60455+ while (iter < role->subj_hash_size) { \
60456+ if (subj == NULL) \
60457+ subj = role->subj_hash[iter]; \
60458+ if (subj == NULL) { \
60459+ iter++; \
60460+ continue; \
60461+ }
60462+
60463+#define FOR_EACH_SUBJECT_END(subj,iter) \
60464+ subj = subj->next; \
60465+ if (subj == NULL) \
60466+ iter++; \
60467+ }
60468+
60469+
60470+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
60471+ subj = role->hash->first; \
60472+ while (subj != NULL) {
60473+
60474+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
60475+ subj = subj->next; \
60476+ }
60477+
60478+#endif
60479+
60480diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
60481new file mode 100644
60482index 0000000..323ecf2
60483--- /dev/null
60484+++ b/include/linux/gralloc.h
60485@@ -0,0 +1,9 @@
60486+#ifndef __GRALLOC_H
60487+#define __GRALLOC_H
60488+
60489+void acl_free_all(void);
60490+int acl_alloc_stack_init(unsigned long size);
60491+void *acl_alloc(unsigned long len);
60492+void *acl_alloc_num(unsigned long num, unsigned long len);
60493+
60494+#endif
60495diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
60496new file mode 100644
60497index 0000000..b30e9bc
60498--- /dev/null
60499+++ b/include/linux/grdefs.h
60500@@ -0,0 +1,140 @@
60501+#ifndef GRDEFS_H
60502+#define GRDEFS_H
60503+
60504+/* Begin grsecurity status declarations */
60505+
60506+enum {
60507+ GR_READY = 0x01,
60508+ GR_STATUS_INIT = 0x00 // disabled state
60509+};
60510+
60511+/* Begin ACL declarations */
60512+
60513+/* Role flags */
60514+
60515+enum {
60516+ GR_ROLE_USER = 0x0001,
60517+ GR_ROLE_GROUP = 0x0002,
60518+ GR_ROLE_DEFAULT = 0x0004,
60519+ GR_ROLE_SPECIAL = 0x0008,
60520+ GR_ROLE_AUTH = 0x0010,
60521+ GR_ROLE_NOPW = 0x0020,
60522+ GR_ROLE_GOD = 0x0040,
60523+ GR_ROLE_LEARN = 0x0080,
60524+ GR_ROLE_TPE = 0x0100,
60525+ GR_ROLE_DOMAIN = 0x0200,
60526+ GR_ROLE_PAM = 0x0400,
60527+ GR_ROLE_PERSIST = 0x0800
60528+};
60529+
60530+/* ACL Subject and Object mode flags */
60531+enum {
60532+ GR_DELETED = 0x80000000
60533+};
60534+
60535+/* ACL Object-only mode flags */
60536+enum {
60537+ GR_READ = 0x00000001,
60538+ GR_APPEND = 0x00000002,
60539+ GR_WRITE = 0x00000004,
60540+ GR_EXEC = 0x00000008,
60541+ GR_FIND = 0x00000010,
60542+ GR_INHERIT = 0x00000020,
60543+ GR_SETID = 0x00000040,
60544+ GR_CREATE = 0x00000080,
60545+ GR_DELETE = 0x00000100,
60546+ GR_LINK = 0x00000200,
60547+ GR_AUDIT_READ = 0x00000400,
60548+ GR_AUDIT_APPEND = 0x00000800,
60549+ GR_AUDIT_WRITE = 0x00001000,
60550+ GR_AUDIT_EXEC = 0x00002000,
60551+ GR_AUDIT_FIND = 0x00004000,
60552+ GR_AUDIT_INHERIT= 0x00008000,
60553+ GR_AUDIT_SETID = 0x00010000,
60554+ GR_AUDIT_CREATE = 0x00020000,
60555+ GR_AUDIT_DELETE = 0x00040000,
60556+ GR_AUDIT_LINK = 0x00080000,
60557+ GR_PTRACERD = 0x00100000,
60558+ GR_NOPTRACE = 0x00200000,
60559+ GR_SUPPRESS = 0x00400000,
60560+ GR_NOLEARN = 0x00800000,
60561+ GR_INIT_TRANSFER= 0x01000000
60562+};
60563+
60564+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
60565+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
60566+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
60567+
60568+/* ACL subject-only mode flags */
60569+enum {
60570+ GR_KILL = 0x00000001,
60571+ GR_VIEW = 0x00000002,
60572+ GR_PROTECTED = 0x00000004,
60573+ GR_LEARN = 0x00000008,
60574+ GR_OVERRIDE = 0x00000010,
60575+ /* just a placeholder, this mode is only used in userspace */
60576+ GR_DUMMY = 0x00000020,
60577+ GR_PROTSHM = 0x00000040,
60578+ GR_KILLPROC = 0x00000080,
60579+ GR_KILLIPPROC = 0x00000100,
60580+ /* just a placeholder, this mode is only used in userspace */
60581+ GR_NOTROJAN = 0x00000200,
60582+ GR_PROTPROCFD = 0x00000400,
60583+ GR_PROCACCT = 0x00000800,
60584+ GR_RELAXPTRACE = 0x00001000,
60585+ GR_NESTED = 0x00002000,
60586+ GR_INHERITLEARN = 0x00004000,
60587+ GR_PROCFIND = 0x00008000,
60588+ GR_POVERRIDE = 0x00010000,
60589+ GR_KERNELAUTH = 0x00020000,
60590+ GR_ATSECURE = 0x00040000,
60591+ GR_SHMEXEC = 0x00080000
60592+};
60593+
60594+enum {
60595+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
60596+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
60597+ GR_PAX_ENABLE_MPROTECT = 0x0004,
60598+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
60599+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
60600+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
60601+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
60602+ GR_PAX_DISABLE_MPROTECT = 0x0400,
60603+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
60604+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
60605+};
60606+
60607+enum {
60608+ GR_ID_USER = 0x01,
60609+ GR_ID_GROUP = 0x02,
60610+};
60611+
60612+enum {
60613+ GR_ID_ALLOW = 0x01,
60614+ GR_ID_DENY = 0x02,
60615+};
60616+
60617+#define GR_CRASH_RES 31
60618+#define GR_UIDTABLE_MAX 500
60619+
60620+/* begin resource learning section */
60621+enum {
60622+ GR_RLIM_CPU_BUMP = 60,
60623+ GR_RLIM_FSIZE_BUMP = 50000,
60624+ GR_RLIM_DATA_BUMP = 10000,
60625+ GR_RLIM_STACK_BUMP = 1000,
60626+ GR_RLIM_CORE_BUMP = 10000,
60627+ GR_RLIM_RSS_BUMP = 500000,
60628+ GR_RLIM_NPROC_BUMP = 1,
60629+ GR_RLIM_NOFILE_BUMP = 5,
60630+ GR_RLIM_MEMLOCK_BUMP = 50000,
60631+ GR_RLIM_AS_BUMP = 500000,
60632+ GR_RLIM_LOCKS_BUMP = 2,
60633+ GR_RLIM_SIGPENDING_BUMP = 5,
60634+ GR_RLIM_MSGQUEUE_BUMP = 10000,
60635+ GR_RLIM_NICE_BUMP = 1,
60636+ GR_RLIM_RTPRIO_BUMP = 1,
60637+ GR_RLIM_RTTIME_BUMP = 1000000
60638+};
60639+
60640+#endif
60641diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
60642new file mode 100644
60643index 0000000..da390f1
60644--- /dev/null
60645+++ b/include/linux/grinternal.h
60646@@ -0,0 +1,221 @@
60647+#ifndef __GRINTERNAL_H
60648+#define __GRINTERNAL_H
60649+
60650+#ifdef CONFIG_GRKERNSEC
60651+
60652+#include <linux/fs.h>
60653+#include <linux/mnt_namespace.h>
60654+#include <linux/nsproxy.h>
60655+#include <linux/gracl.h>
60656+#include <linux/grdefs.h>
60657+#include <linux/grmsg.h>
60658+
60659+void gr_add_learn_entry(const char *fmt, ...)
60660+ __attribute__ ((format (printf, 1, 2)));
60661+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
60662+ const struct vfsmount *mnt);
60663+__u32 gr_check_create(const struct dentry *new_dentry,
60664+ const struct dentry *parent,
60665+ const struct vfsmount *mnt, const __u32 mode);
60666+int gr_check_protected_task(const struct task_struct *task);
60667+__u32 to_gr_audit(const __u32 reqmode);
60668+int gr_set_acls(const int type);
60669+int gr_apply_subject_to_task(struct task_struct *task);
60670+int gr_acl_is_enabled(void);
60671+char gr_roletype_to_char(void);
60672+
60673+void gr_handle_alertkill(struct task_struct *task);
60674+char *gr_to_filename(const struct dentry *dentry,
60675+ const struct vfsmount *mnt);
60676+char *gr_to_filename1(const struct dentry *dentry,
60677+ const struct vfsmount *mnt);
60678+char *gr_to_filename2(const struct dentry *dentry,
60679+ const struct vfsmount *mnt);
60680+char *gr_to_filename3(const struct dentry *dentry,
60681+ const struct vfsmount *mnt);
60682+
60683+extern int grsec_enable_ptrace_readexec;
60684+extern int grsec_enable_harden_ptrace;
60685+extern int grsec_enable_link;
60686+extern int grsec_enable_fifo;
60687+extern int grsec_enable_execve;
60688+extern int grsec_enable_shm;
60689+extern int grsec_enable_execlog;
60690+extern int grsec_enable_signal;
60691+extern int grsec_enable_audit_ptrace;
60692+extern int grsec_enable_forkfail;
60693+extern int grsec_enable_time;
60694+extern int grsec_enable_rofs;
60695+extern int grsec_enable_chroot_shmat;
60696+extern int grsec_enable_chroot_mount;
60697+extern int grsec_enable_chroot_double;
60698+extern int grsec_enable_chroot_pivot;
60699+extern int grsec_enable_chroot_chdir;
60700+extern int grsec_enable_chroot_chmod;
60701+extern int grsec_enable_chroot_mknod;
60702+extern int grsec_enable_chroot_fchdir;
60703+extern int grsec_enable_chroot_nice;
60704+extern int grsec_enable_chroot_execlog;
60705+extern int grsec_enable_chroot_caps;
60706+extern int grsec_enable_chroot_sysctl;
60707+extern int grsec_enable_chroot_unix;
60708+extern int grsec_enable_tpe;
60709+extern int grsec_tpe_gid;
60710+extern int grsec_enable_tpe_all;
60711+extern int grsec_enable_tpe_invert;
60712+extern int grsec_enable_socket_all;
60713+extern int grsec_socket_all_gid;
60714+extern int grsec_enable_socket_client;
60715+extern int grsec_socket_client_gid;
60716+extern int grsec_enable_socket_server;
60717+extern int grsec_socket_server_gid;
60718+extern int grsec_audit_gid;
60719+extern int grsec_enable_group;
60720+extern int grsec_enable_audit_textrel;
60721+extern int grsec_enable_log_rwxmaps;
60722+extern int grsec_enable_mount;
60723+extern int grsec_enable_chdir;
60724+extern int grsec_resource_logging;
60725+extern int grsec_enable_blackhole;
60726+extern int grsec_lastack_retries;
60727+extern int grsec_enable_brute;
60728+extern int grsec_lock;
60729+
60730+extern spinlock_t grsec_alert_lock;
60731+extern unsigned long grsec_alert_wtime;
60732+extern unsigned long grsec_alert_fyet;
60733+
60734+extern spinlock_t grsec_audit_lock;
60735+
60736+extern rwlock_t grsec_exec_file_lock;
60737+
60738+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
60739+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
60740+ (tsk)->exec_file->f_vfsmnt) : "/")
60741+
60742+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
60743+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
60744+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
60745+
60746+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
60747+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
60748+ (tsk)->exec_file->f_vfsmnt) : "/")
60749+
60750+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
60751+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
60752+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
60753+
60754+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
60755+
60756+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
60757+
60758+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
60759+ (task)->pid, (cred)->uid, \
60760+ (cred)->euid, (cred)->gid, (cred)->egid, \
60761+ gr_parent_task_fullpath(task), \
60762+ (task)->real_parent->comm, (task)->real_parent->pid, \
60763+ (pcred)->uid, (pcred)->euid, \
60764+ (pcred)->gid, (pcred)->egid
60765+
60766+#define GR_CHROOT_CAPS {{ \
60767+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
60768+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
60769+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
60770+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
60771+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
60772+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
60773+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
60774+
60775+#define security_learn(normal_msg,args...) \
60776+({ \
60777+ read_lock(&grsec_exec_file_lock); \
60778+ gr_add_learn_entry(normal_msg "\n", ## args); \
60779+ read_unlock(&grsec_exec_file_lock); \
60780+})
60781+
60782+enum {
60783+ GR_DO_AUDIT,
60784+ GR_DONT_AUDIT,
60785+ /* used for non-audit messages that we shouldn't kill the task on */
60786+ GR_DONT_AUDIT_GOOD
60787+};
60788+
60789+enum {
60790+ GR_TTYSNIFF,
60791+ GR_RBAC,
60792+ GR_RBAC_STR,
60793+ GR_STR_RBAC,
60794+ GR_RBAC_MODE2,
60795+ GR_RBAC_MODE3,
60796+ GR_FILENAME,
60797+ GR_SYSCTL_HIDDEN,
60798+ GR_NOARGS,
60799+ GR_ONE_INT,
60800+ GR_ONE_INT_TWO_STR,
60801+ GR_ONE_STR,
60802+ GR_STR_INT,
60803+ GR_TWO_STR_INT,
60804+ GR_TWO_INT,
60805+ GR_TWO_U64,
60806+ GR_THREE_INT,
60807+ GR_FIVE_INT_TWO_STR,
60808+ GR_TWO_STR,
60809+ GR_THREE_STR,
60810+ GR_FOUR_STR,
60811+ GR_STR_FILENAME,
60812+ GR_FILENAME_STR,
60813+ GR_FILENAME_TWO_INT,
60814+ GR_FILENAME_TWO_INT_STR,
60815+ GR_TEXTREL,
60816+ GR_PTRACE,
60817+ GR_RESOURCE,
60818+ GR_CAP,
60819+ GR_SIG,
60820+ GR_SIG2,
60821+ GR_CRASH1,
60822+ GR_CRASH2,
60823+ GR_PSACCT,
60824+ GR_RWXMAP
60825+};
60826+
60827+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
60828+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
60829+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
60830+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
60831+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
60832+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
60833+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
60834+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
60835+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
60836+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
60837+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
60838+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
60839+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
60840+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
60841+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
60842+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
60843+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
60844+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
60845+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
60846+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
60847+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
60848+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
60849+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
60850+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
60851+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
60852+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
60853+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
60854+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
60855+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
60856+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
60857+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
60858+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
60859+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
60860+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
60861+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
60862+
60863+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
60864+
60865+#endif
60866+
60867+#endif
60868diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
60869new file mode 100644
60870index 0000000..ae576a1
60871--- /dev/null
60872+++ b/include/linux/grmsg.h
60873@@ -0,0 +1,109 @@
60874+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
60875+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
60876+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
60877+#define GR_STOPMOD_MSG "denied modification of module state by "
60878+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
60879+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
60880+#define GR_IOPERM_MSG "denied use of ioperm() by "
60881+#define GR_IOPL_MSG "denied use of iopl() by "
60882+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
60883+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
60884+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
60885+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
60886+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
60887+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
60888+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
60889+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
60890+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
60891+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
60892+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
60893+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
60894+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
60895+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
60896+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
60897+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
60898+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
60899+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
60900+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
60901+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
60902+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
60903+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
60904+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
60905+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
60906+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
60907+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
60908+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
60909+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
60910+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
60911+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
60912+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
60913+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
60914+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
60915+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
60916+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
60917+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
60918+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
60919+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
60920+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
60921+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
60922+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
60923+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
60924+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
60925+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
60926+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
60927+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
60928+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
60929+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
60930+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
60931+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
60932+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
60933+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
60934+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
60935+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
60936+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
60937+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
60938+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
60939+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
60940+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
60941+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
60942+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
60943+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
60944+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
60945+#define GR_FAILFORK_MSG "failed fork with errno %s by "
60946+#define GR_NICE_CHROOT_MSG "denied priority change by "
60947+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
60948+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
60949+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
60950+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
60951+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
60952+#define GR_TIME_MSG "time set by "
60953+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
60954+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
60955+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
60956+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
60957+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
60958+#define GR_BIND_MSG "denied bind() by "
60959+#define GR_CONNECT_MSG "denied connect() by "
60960+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
60961+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
60962+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
60963+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
60964+#define GR_CAP_ACL_MSG "use of %s denied for "
60965+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
60966+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
60967+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
60968+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
60969+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
60970+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
60971+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
60972+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
60973+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
60974+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
60975+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
60976+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
60977+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
60978+#define GR_VM86_MSG "denied use of vm86 by "
60979+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
60980+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
60981+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
60982+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
60983diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
60984new file mode 100644
60985index 0000000..acd05db
60986--- /dev/null
60987+++ b/include/linux/grsecurity.h
60988@@ -0,0 +1,232 @@
60989+#ifndef GR_SECURITY_H
60990+#define GR_SECURITY_H
60991+#include <linux/fs.h>
60992+#include <linux/fs_struct.h>
60993+#include <linux/binfmts.h>
60994+#include <linux/gracl.h>
60995+
60996+/* notify of brain-dead configs */
60997+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60998+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
60999+#endif
61000+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
61001+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
61002+#endif
61003+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
61004+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
61005+#endif
61006+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
61007+#error "CONFIG_PAX enabled, but no PaX options are enabled."
61008+#endif
61009+
61010+#include <linux/compat.h>
61011+
61012+struct user_arg_ptr {
61013+#ifdef CONFIG_COMPAT
61014+ bool is_compat;
61015+#endif
61016+ union {
61017+ const char __user *const __user *native;
61018+#ifdef CONFIG_COMPAT
61019+ compat_uptr_t __user *compat;
61020+#endif
61021+ } ptr;
61022+};
61023+
61024+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
61025+void gr_handle_brute_check(void);
61026+void gr_handle_kernel_exploit(void);
61027+int gr_process_user_ban(void);
61028+
61029+char gr_roletype_to_char(void);
61030+
61031+int gr_acl_enable_at_secure(void);
61032+
61033+int gr_check_user_change(int real, int effective, int fs);
61034+int gr_check_group_change(int real, int effective, int fs);
61035+
61036+void gr_del_task_from_ip_table(struct task_struct *p);
61037+
61038+int gr_pid_is_chrooted(struct task_struct *p);
61039+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
61040+int gr_handle_chroot_nice(void);
61041+int gr_handle_chroot_sysctl(const int op);
61042+int gr_handle_chroot_setpriority(struct task_struct *p,
61043+ const int niceval);
61044+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
61045+int gr_handle_chroot_chroot(const struct dentry *dentry,
61046+ const struct vfsmount *mnt);
61047+void gr_handle_chroot_chdir(struct path *path);
61048+int gr_handle_chroot_chmod(const struct dentry *dentry,
61049+ const struct vfsmount *mnt, const int mode);
61050+int gr_handle_chroot_mknod(const struct dentry *dentry,
61051+ const struct vfsmount *mnt, const int mode);
61052+int gr_handle_chroot_mount(const struct dentry *dentry,
61053+ const struct vfsmount *mnt,
61054+ const char *dev_name);
61055+int gr_handle_chroot_pivot(void);
61056+int gr_handle_chroot_unix(const pid_t pid);
61057+
61058+int gr_handle_rawio(const struct inode *inode);
61059+
61060+void gr_handle_ioperm(void);
61061+void gr_handle_iopl(void);
61062+
61063+umode_t gr_acl_umask(void);
61064+
61065+int gr_tpe_allow(const struct file *file);
61066+
61067+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
61068+void gr_clear_chroot_entries(struct task_struct *task);
61069+
61070+void gr_log_forkfail(const int retval);
61071+void gr_log_timechange(void);
61072+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
61073+void gr_log_chdir(const struct dentry *dentry,
61074+ const struct vfsmount *mnt);
61075+void gr_log_chroot_exec(const struct dentry *dentry,
61076+ const struct vfsmount *mnt);
61077+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
61078+void gr_log_remount(const char *devname, const int retval);
61079+void gr_log_unmount(const char *devname, const int retval);
61080+void gr_log_mount(const char *from, const char *to, const int retval);
61081+void gr_log_textrel(struct vm_area_struct *vma);
61082+void gr_log_rwxmmap(struct file *file);
61083+void gr_log_rwxmprotect(struct file *file);
61084+
61085+int gr_handle_follow_link(const struct inode *parent,
61086+ const struct inode *inode,
61087+ const struct dentry *dentry,
61088+ const struct vfsmount *mnt);
61089+int gr_handle_fifo(const struct dentry *dentry,
61090+ const struct vfsmount *mnt,
61091+ const struct dentry *dir, const int flag,
61092+ const int acc_mode);
61093+int gr_handle_hardlink(const struct dentry *dentry,
61094+ const struct vfsmount *mnt,
61095+ struct inode *inode,
61096+ const int mode, const char *to);
61097+
61098+int gr_is_capable(const int cap);
61099+int gr_is_capable_nolog(const int cap);
61100+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
61101+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
61102+
61103+void gr_learn_resource(const struct task_struct *task, const int limit,
61104+ const unsigned long wanted, const int gt);
61105+void gr_copy_label(struct task_struct *tsk);
61106+void gr_handle_crash(struct task_struct *task, const int sig);
61107+int gr_handle_signal(const struct task_struct *p, const int sig);
61108+int gr_check_crash_uid(const uid_t uid);
61109+int gr_check_protected_task(const struct task_struct *task);
61110+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
61111+int gr_acl_handle_mmap(const struct file *file,
61112+ const unsigned long prot);
61113+int gr_acl_handle_mprotect(const struct file *file,
61114+ const unsigned long prot);
61115+int gr_check_hidden_task(const struct task_struct *tsk);
61116+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
61117+ const struct vfsmount *mnt);
61118+__u32 gr_acl_handle_utime(const struct dentry *dentry,
61119+ const struct vfsmount *mnt);
61120+__u32 gr_acl_handle_access(const struct dentry *dentry,
61121+ const struct vfsmount *mnt, const int fmode);
61122+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
61123+ const struct vfsmount *mnt, umode_t *mode);
61124+__u32 gr_acl_handle_chown(const struct dentry *dentry,
61125+ const struct vfsmount *mnt);
61126+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
61127+ const struct vfsmount *mnt);
61128+int gr_handle_ptrace(struct task_struct *task, const long request);
61129+int gr_handle_proc_ptrace(struct task_struct *task);
61130+__u32 gr_acl_handle_execve(const struct dentry *dentry,
61131+ const struct vfsmount *mnt);
61132+int gr_check_crash_exec(const struct file *filp);
61133+int gr_acl_is_enabled(void);
61134+void gr_set_kernel_label(struct task_struct *task);
61135+void gr_set_role_label(struct task_struct *task, const uid_t uid,
61136+ const gid_t gid);
61137+int gr_set_proc_label(const struct dentry *dentry,
61138+ const struct vfsmount *mnt,
61139+ const int unsafe_flags);
61140+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
61141+ const struct vfsmount *mnt);
61142+__u32 gr_acl_handle_open(const struct dentry *dentry,
61143+ const struct vfsmount *mnt, int acc_mode);
61144+__u32 gr_acl_handle_creat(const struct dentry *dentry,
61145+ const struct dentry *p_dentry,
61146+ const struct vfsmount *p_mnt,
61147+ int open_flags, int acc_mode, const int imode);
61148+void gr_handle_create(const struct dentry *dentry,
61149+ const struct vfsmount *mnt);
61150+void gr_handle_proc_create(const struct dentry *dentry,
61151+ const struct inode *inode);
61152+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
61153+ const struct dentry *parent_dentry,
61154+ const struct vfsmount *parent_mnt,
61155+ const int mode);
61156+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
61157+ const struct dentry *parent_dentry,
61158+ const struct vfsmount *parent_mnt);
61159+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
61160+ const struct vfsmount *mnt);
61161+void gr_handle_delete(const ino_t ino, const dev_t dev);
61162+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
61163+ const struct vfsmount *mnt);
61164+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
61165+ const struct dentry *parent_dentry,
61166+ const struct vfsmount *parent_mnt,
61167+ const char *from);
61168+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
61169+ const struct dentry *parent_dentry,
61170+ const struct vfsmount *parent_mnt,
61171+ const struct dentry *old_dentry,
61172+ const struct vfsmount *old_mnt, const char *to);
61173+int gr_acl_handle_rename(struct dentry *new_dentry,
61174+ struct dentry *parent_dentry,
61175+ const struct vfsmount *parent_mnt,
61176+ struct dentry *old_dentry,
61177+ struct inode *old_parent_inode,
61178+ struct vfsmount *old_mnt, const char *newname);
61179+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
61180+ struct dentry *old_dentry,
61181+ struct dentry *new_dentry,
61182+ struct vfsmount *mnt, const __u8 replace);
61183+__u32 gr_check_link(const struct dentry *new_dentry,
61184+ const struct dentry *parent_dentry,
61185+ const struct vfsmount *parent_mnt,
61186+ const struct dentry *old_dentry,
61187+ const struct vfsmount *old_mnt);
61188+int gr_acl_handle_filldir(const struct file *file, const char *name,
61189+ const unsigned int namelen, const ino_t ino);
61190+
61191+__u32 gr_acl_handle_unix(const struct dentry *dentry,
61192+ const struct vfsmount *mnt);
61193+void gr_acl_handle_exit(void);
61194+void gr_acl_handle_psacct(struct task_struct *task, const long code);
61195+int gr_acl_handle_procpidmem(const struct task_struct *task);
61196+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
61197+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
61198+void gr_audit_ptrace(struct task_struct *task);
61199+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
61200+
61201+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
61202+
61203+#ifdef CONFIG_GRKERNSEC
61204+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
61205+void gr_handle_vm86(void);
61206+void gr_handle_mem_readwrite(u64 from, u64 to);
61207+
61208+void gr_log_badprocpid(const char *entry);
61209+
61210+extern int grsec_enable_dmesg;
61211+extern int grsec_disable_privio;
61212+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61213+extern int grsec_enable_chroot_findtask;
61214+#endif
61215+#ifdef CONFIG_GRKERNSEC_SETXID
61216+extern int grsec_enable_setxid;
61217+#endif
61218+#endif
61219+
61220+#endif
61221diff --git a/include/linux/grsock.h b/include/linux/grsock.h
61222new file mode 100644
61223index 0000000..e7ffaaf
61224--- /dev/null
61225+++ b/include/linux/grsock.h
61226@@ -0,0 +1,19 @@
61227+#ifndef __GRSOCK_H
61228+#define __GRSOCK_H
61229+
61230+extern void gr_attach_curr_ip(const struct sock *sk);
61231+extern int gr_handle_sock_all(const int family, const int type,
61232+ const int protocol);
61233+extern int gr_handle_sock_server(const struct sockaddr *sck);
61234+extern int gr_handle_sock_server_other(const struct sock *sck);
61235+extern int gr_handle_sock_client(const struct sockaddr *sck);
61236+extern int gr_search_connect(struct socket * sock,
61237+ struct sockaddr_in * addr);
61238+extern int gr_search_bind(struct socket * sock,
61239+ struct sockaddr_in * addr);
61240+extern int gr_search_listen(struct socket * sock);
61241+extern int gr_search_accept(struct socket * sock);
61242+extern int gr_search_socket(const int domain, const int type,
61243+ const int protocol);
61244+
61245+#endif
61246diff --git a/include/linux/hid.h b/include/linux/hid.h
61247index 3a95da6..51986f1 100644
61248--- a/include/linux/hid.h
61249+++ b/include/linux/hid.h
61250@@ -696,7 +696,7 @@ struct hid_ll_driver {
61251 unsigned int code, int value);
61252
61253 int (*parse)(struct hid_device *hdev);
61254-};
61255+} __no_const;
61256
61257 #define PM_HINT_FULLON 1<<5
61258 #define PM_HINT_NORMAL 1<<1
61259diff --git a/include/linux/highmem.h b/include/linux/highmem.h
61260index d3999b4..1304cb4 100644
61261--- a/include/linux/highmem.h
61262+++ b/include/linux/highmem.h
61263@@ -221,6 +221,18 @@ static inline void clear_highpage(struct page *page)
61264 kunmap_atomic(kaddr);
61265 }
61266
61267+static inline void sanitize_highpage(struct page *page)
61268+{
61269+ void *kaddr;
61270+ unsigned long flags;
61271+
61272+ local_irq_save(flags);
61273+ kaddr = kmap_atomic(page);
61274+ clear_page(kaddr);
61275+ kunmap_atomic(kaddr);
61276+ local_irq_restore(flags);
61277+}
61278+
61279 static inline void zero_user_segments(struct page *page,
61280 unsigned start1, unsigned end1,
61281 unsigned start2, unsigned end2)
61282diff --git a/include/linux/i2c.h b/include/linux/i2c.h
61283index 195d8b3..e20cfab 100644
61284--- a/include/linux/i2c.h
61285+++ b/include/linux/i2c.h
61286@@ -365,6 +365,7 @@ struct i2c_algorithm {
61287 /* To determine what the adapter supports */
61288 u32 (*functionality) (struct i2c_adapter *);
61289 };
61290+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
61291
61292 /*
61293 * i2c_adapter is the structure used to identify a physical i2c bus along
61294diff --git a/include/linux/i2o.h b/include/linux/i2o.h
61295index d23c3c2..eb63c81 100644
61296--- a/include/linux/i2o.h
61297+++ b/include/linux/i2o.h
61298@@ -565,7 +565,7 @@ struct i2o_controller {
61299 struct i2o_device *exec; /* Executive */
61300 #if BITS_PER_LONG == 64
61301 spinlock_t context_list_lock; /* lock for context_list */
61302- atomic_t context_list_counter; /* needed for unique contexts */
61303+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
61304 struct list_head context_list; /* list of context id's
61305 and pointers */
61306 #endif
61307diff --git a/include/linux/if_team.h b/include/linux/if_team.h
61308index 58404b0..439ed95 100644
61309--- a/include/linux/if_team.h
61310+++ b/include/linux/if_team.h
61311@@ -64,6 +64,7 @@ struct team_mode_ops {
61312 void (*port_leave)(struct team *team, struct team_port *port);
61313 void (*port_change_mac)(struct team *team, struct team_port *port);
61314 };
61315+typedef struct team_mode_ops __no_const team_mode_ops_no_const;
61316
61317 enum team_option_type {
61318 TEAM_OPTION_TYPE_U32,
61319@@ -112,7 +113,7 @@ struct team {
61320 struct list_head option_list;
61321
61322 const struct team_mode *mode;
61323- struct team_mode_ops ops;
61324+ team_mode_ops_no_const ops;
61325 long mode_priv[TEAM_MODE_PRIV_LONGS];
61326 };
61327
61328diff --git a/include/linux/init.h b/include/linux/init.h
61329index 6b95109..4aca62c 100644
61330--- a/include/linux/init.h
61331+++ b/include/linux/init.h
61332@@ -294,13 +294,13 @@ void __init parse_early_options(char *cmdline);
61333
61334 /* Each module must use one module_init(). */
61335 #define module_init(initfn) \
61336- static inline initcall_t __inittest(void) \
61337+ static inline __used initcall_t __inittest(void) \
61338 { return initfn; } \
61339 int init_module(void) __attribute__((alias(#initfn)));
61340
61341 /* This is only required if you want to be unloadable. */
61342 #define module_exit(exitfn) \
61343- static inline exitcall_t __exittest(void) \
61344+ static inline __used exitcall_t __exittest(void) \
61345 { return exitfn; } \
61346 void cleanup_module(void) __attribute__((alias(#exitfn)));
61347
61348diff --git a/include/linux/init_task.h b/include/linux/init_task.h
61349index e4baff5..83bb175 100644
61350--- a/include/linux/init_task.h
61351+++ b/include/linux/init_task.h
61352@@ -134,6 +134,12 @@ extern struct cred init_cred;
61353
61354 #define INIT_TASK_COMM "swapper"
61355
61356+#ifdef CONFIG_X86
61357+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
61358+#else
61359+#define INIT_TASK_THREAD_INFO
61360+#endif
61361+
61362 /*
61363 * INIT_TASK is used to set up the first task table, touch at
61364 * your own risk!. Base=0, limit=0x1fffff (=2MB)
61365@@ -172,6 +178,7 @@ extern struct cred init_cred;
61366 RCU_INIT_POINTER(.cred, &init_cred), \
61367 .comm = INIT_TASK_COMM, \
61368 .thread = INIT_THREAD, \
61369+ INIT_TASK_THREAD_INFO \
61370 .fs = &init_fs, \
61371 .files = &init_files, \
61372 .signal = &init_signals, \
61373diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
61374index e6ca56d..8583707 100644
61375--- a/include/linux/intel-iommu.h
61376+++ b/include/linux/intel-iommu.h
61377@@ -296,7 +296,7 @@ struct iommu_flush {
61378 u8 fm, u64 type);
61379 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
61380 unsigned int size_order, u64 type);
61381-};
61382+} __no_const;
61383
61384 enum {
61385 SR_DMAR_FECTL_REG,
61386diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
61387index 2aea5d2..0b82f0c 100644
61388--- a/include/linux/interrupt.h
61389+++ b/include/linux/interrupt.h
61390@@ -439,7 +439,7 @@ enum
61391 /* map softirq index to softirq name. update 'softirq_to_name' in
61392 * kernel/softirq.c when adding a new softirq.
61393 */
61394-extern char *softirq_to_name[NR_SOFTIRQS];
61395+extern const char * const softirq_to_name[NR_SOFTIRQS];
61396
61397 /* softirq mask and active fields moved to irq_cpustat_t in
61398 * asm/hardirq.h to get better cache usage. KAO
61399@@ -447,12 +447,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
61400
61401 struct softirq_action
61402 {
61403- void (*action)(struct softirq_action *);
61404+ void (*action)(void);
61405 };
61406
61407 asmlinkage void do_softirq(void);
61408 asmlinkage void __do_softirq(void);
61409-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
61410+extern void open_softirq(int nr, void (*action)(void));
61411 extern void softirq_init(void);
61412 extern void __raise_softirq_irqoff(unsigned int nr);
61413
61414diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
61415index 3875719..4cd454c 100644
61416--- a/include/linux/kallsyms.h
61417+++ b/include/linux/kallsyms.h
61418@@ -15,7 +15,8 @@
61419
61420 struct module;
61421
61422-#ifdef CONFIG_KALLSYMS
61423+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
61424+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
61425 /* Lookup the address for a symbol. Returns 0 if not found. */
61426 unsigned long kallsyms_lookup_name(const char *name);
61427
61428@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
61429 /* Stupid that this does nothing, but I didn't create this mess. */
61430 #define __print_symbol(fmt, addr)
61431 #endif /*CONFIG_KALLSYMS*/
61432+#else /* when included by kallsyms.c, vsnprintf.c, or
61433+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
61434+extern void __print_symbol(const char *fmt, unsigned long address);
61435+extern int sprint_backtrace(char *buffer, unsigned long address);
61436+extern int sprint_symbol(char *buffer, unsigned long address);
61437+const char *kallsyms_lookup(unsigned long addr,
61438+ unsigned long *symbolsize,
61439+ unsigned long *offset,
61440+ char **modname, char *namebuf);
61441+#endif
61442
61443 /* This macro allows us to keep printk typechecking */
61444 static __printf(1, 2)
61445diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
61446index c4d2fc1..5df9c19 100644
61447--- a/include/linux/kgdb.h
61448+++ b/include/linux/kgdb.h
61449@@ -53,7 +53,7 @@ extern int kgdb_connected;
61450 extern int kgdb_io_module_registered;
61451
61452 extern atomic_t kgdb_setting_breakpoint;
61453-extern atomic_t kgdb_cpu_doing_single_step;
61454+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
61455
61456 extern struct task_struct *kgdb_usethread;
61457 extern struct task_struct *kgdb_contthread;
61458@@ -252,7 +252,7 @@ struct kgdb_arch {
61459 void (*disable_hw_break)(struct pt_regs *regs);
61460 void (*remove_all_hw_break)(void);
61461 void (*correct_hw_break)(void);
61462-};
61463+} __do_const;
61464
61465 /**
61466 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
61467@@ -277,7 +277,7 @@ struct kgdb_io {
61468 void (*pre_exception) (void);
61469 void (*post_exception) (void);
61470 int is_console;
61471-};
61472+} __do_const;
61473
61474 extern struct kgdb_arch arch_kgdb_ops;
61475
61476diff --git a/include/linux/kmod.h b/include/linux/kmod.h
61477index dd99c32..da06047 100644
61478--- a/include/linux/kmod.h
61479+++ b/include/linux/kmod.h
61480@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
61481 * usually useless though. */
61482 extern __printf(2, 3)
61483 int __request_module(bool wait, const char *name, ...);
61484+extern __printf(3, 4)
61485+int ___request_module(bool wait, char *param_name, const char *name, ...);
61486 #define request_module(mod...) __request_module(true, mod)
61487 #define request_module_nowait(mod...) __request_module(false, mod)
61488 #define try_then_request_module(x, mod...) \
61489diff --git a/include/linux/kref.h b/include/linux/kref.h
61490index 9c07dce..a92fa71 100644
61491--- a/include/linux/kref.h
61492+++ b/include/linux/kref.h
61493@@ -63,7 +63,7 @@ static inline void kref_get(struct kref *kref)
61494 static inline int kref_sub(struct kref *kref, unsigned int count,
61495 void (*release)(struct kref *kref))
61496 {
61497- WARN_ON(release == NULL);
61498+ BUG_ON(release == NULL);
61499
61500 if (atomic_sub_and_test((int) count, &kref->refcount)) {
61501 release(kref);
61502diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
61503index 72cbf08..dd0201d 100644
61504--- a/include/linux/kvm_host.h
61505+++ b/include/linux/kvm_host.h
61506@@ -322,7 +322,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
61507 void vcpu_load(struct kvm_vcpu *vcpu);
61508 void vcpu_put(struct kvm_vcpu *vcpu);
61509
61510-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
61511+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
61512 struct module *module);
61513 void kvm_exit(void);
61514
61515@@ -486,7 +486,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
61516 struct kvm_guest_debug *dbg);
61517 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
61518
61519-int kvm_arch_init(void *opaque);
61520+int kvm_arch_init(const void *opaque);
61521 void kvm_arch_exit(void);
61522
61523 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
61524diff --git a/include/linux/libata.h b/include/linux/libata.h
61525index 6e887c7..4539601 100644
61526--- a/include/linux/libata.h
61527+++ b/include/linux/libata.h
61528@@ -910,7 +910,7 @@ struct ata_port_operations {
61529 * fields must be pointers.
61530 */
61531 const struct ata_port_operations *inherits;
61532-};
61533+} __do_const;
61534
61535 struct ata_port_info {
61536 unsigned long flags;
61537diff --git a/include/linux/mca.h b/include/linux/mca.h
61538index 3797270..7765ede 100644
61539--- a/include/linux/mca.h
61540+++ b/include/linux/mca.h
61541@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
61542 int region);
61543 void * (*mca_transform_memory)(struct mca_device *,
61544 void *memory);
61545-};
61546+} __no_const;
61547
61548 struct mca_bus {
61549 u64 default_dma_mask;
61550diff --git a/include/linux/memory.h b/include/linux/memory.h
61551index 1ac7f6e..a5794d0 100644
61552--- a/include/linux/memory.h
61553+++ b/include/linux/memory.h
61554@@ -143,7 +143,7 @@ struct memory_accessor {
61555 size_t count);
61556 ssize_t (*write)(struct memory_accessor *, const char *buf,
61557 off_t offset, size_t count);
61558-};
61559+} __no_const;
61560
61561 /*
61562 * Kernel text modification mutex, used for code patching. Users of this lock
61563diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
61564index ee96cd5..7823c3a 100644
61565--- a/include/linux/mfd/abx500.h
61566+++ b/include/linux/mfd/abx500.h
61567@@ -455,6 +455,7 @@ struct abx500_ops {
61568 int (*event_registers_startup_state_get) (struct device *, u8 *);
61569 int (*startup_irq_enabled) (struct device *, unsigned int);
61570 };
61571+typedef struct abx500_ops __no_const abx500_ops_no_const;
61572
61573 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
61574 void abx500_remove_ops(struct device *dev);
61575diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
61576index 9b07725..3d55001 100644
61577--- a/include/linux/mfd/abx500/ux500_chargalg.h
61578+++ b/include/linux/mfd/abx500/ux500_chargalg.h
61579@@ -19,7 +19,7 @@ struct ux500_charger_ops {
61580 int (*enable) (struct ux500_charger *, int, int, int);
61581 int (*kick_wd) (struct ux500_charger *);
61582 int (*update_curr) (struct ux500_charger *, int);
61583-};
61584+} __no_const;
61585
61586 /**
61587 * struct ux500_charger - power supply ux500 charger sub class
61588diff --git a/include/linux/mm.h b/include/linux/mm.h
61589index 74aa71b..4ae97ba 100644
61590--- a/include/linux/mm.h
61591+++ b/include/linux/mm.h
61592@@ -116,7 +116,14 @@ extern unsigned int kobjsize(const void *objp);
61593
61594 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
61595 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
61596+
61597+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61598+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
61599+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
61600+#else
61601 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
61602+#endif
61603+
61604 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
61605 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
61606
61607@@ -1013,34 +1020,6 @@ int set_page_dirty(struct page *page);
61608 int set_page_dirty_lock(struct page *page);
61609 int clear_page_dirty_for_io(struct page *page);
61610
61611-/* Is the vma a continuation of the stack vma above it? */
61612-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
61613-{
61614- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
61615-}
61616-
61617-static inline int stack_guard_page_start(struct vm_area_struct *vma,
61618- unsigned long addr)
61619-{
61620- return (vma->vm_flags & VM_GROWSDOWN) &&
61621- (vma->vm_start == addr) &&
61622- !vma_growsdown(vma->vm_prev, addr);
61623-}
61624-
61625-/* Is the vma a continuation of the stack vma below it? */
61626-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
61627-{
61628- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
61629-}
61630-
61631-static inline int stack_guard_page_end(struct vm_area_struct *vma,
61632- unsigned long addr)
61633-{
61634- return (vma->vm_flags & VM_GROWSUP) &&
61635- (vma->vm_end == addr) &&
61636- !vma_growsup(vma->vm_next, addr);
61637-}
61638-
61639 extern pid_t
61640 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
61641
61642@@ -1139,6 +1118,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
61643 }
61644 #endif
61645
61646+#ifdef CONFIG_MMU
61647+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
61648+#else
61649+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
61650+{
61651+ return __pgprot(0);
61652+}
61653+#endif
61654+
61655 int vma_wants_writenotify(struct vm_area_struct *vma);
61656
61657 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
61658@@ -1157,8 +1145,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
61659 {
61660 return 0;
61661 }
61662+
61663+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
61664+ unsigned long address)
61665+{
61666+ return 0;
61667+}
61668 #else
61669 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
61670+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
61671 #endif
61672
61673 #ifdef __PAGETABLE_PMD_FOLDED
61674@@ -1167,8 +1162,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
61675 {
61676 return 0;
61677 }
61678+
61679+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
61680+ unsigned long address)
61681+{
61682+ return 0;
61683+}
61684 #else
61685 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
61686+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
61687 #endif
61688
61689 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
61690@@ -1186,11 +1188,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
61691 NULL: pud_offset(pgd, address);
61692 }
61693
61694+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
61695+{
61696+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
61697+ NULL: pud_offset(pgd, address);
61698+}
61699+
61700 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
61701 {
61702 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
61703 NULL: pmd_offset(pud, address);
61704 }
61705+
61706+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
61707+{
61708+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
61709+ NULL: pmd_offset(pud, address);
61710+}
61711 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
61712
61713 #if USE_SPLIT_PTLOCKS
61714@@ -1400,6 +1414,7 @@ extern unsigned long do_mmap(struct file *, unsigned long,
61715 unsigned long, unsigned long,
61716 unsigned long, unsigned long);
61717 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
61718+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
61719
61720 /* These take the mm semaphore themselves */
61721 extern unsigned long vm_brk(unsigned long, unsigned long);
61722@@ -1462,6 +1477,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
61723 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
61724 struct vm_area_struct **pprev);
61725
61726+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
61727+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
61728+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
61729+
61730 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
61731 NULL if none. Assume start_addr < end_addr. */
61732 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
61733@@ -1490,15 +1509,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
61734 return vma;
61735 }
61736
61737-#ifdef CONFIG_MMU
61738-pgprot_t vm_get_page_prot(unsigned long vm_flags);
61739-#else
61740-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
61741-{
61742- return __pgprot(0);
61743-}
61744-#endif
61745-
61746 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
61747 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
61748 unsigned long pfn, unsigned long size, pgprot_t);
61749@@ -1602,7 +1612,7 @@ extern int unpoison_memory(unsigned long pfn);
61750 extern int sysctl_memory_failure_early_kill;
61751 extern int sysctl_memory_failure_recovery;
61752 extern void shake_page(struct page *p, int access);
61753-extern atomic_long_t mce_bad_pages;
61754+extern atomic_long_unchecked_t mce_bad_pages;
61755 extern int soft_offline_page(struct page *page, int flags);
61756
61757 extern void dump_page(struct page *page);
61758@@ -1633,5 +1643,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
61759 static inline bool page_is_guard(struct page *page) { return false; }
61760 #endif /* CONFIG_DEBUG_PAGEALLOC */
61761
61762+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
61763+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
61764+#else
61765+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
61766+#endif
61767+
61768 #endif /* __KERNEL__ */
61769 #endif /* _LINUX_MM_H */
61770diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
61771index 3cc3062..efeaeb7 100644
61772--- a/include/linux/mm_types.h
61773+++ b/include/linux/mm_types.h
61774@@ -252,6 +252,8 @@ struct vm_area_struct {
61775 #ifdef CONFIG_NUMA
61776 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
61777 #endif
61778+
61779+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
61780 };
61781
61782 struct core_thread {
61783@@ -326,7 +328,7 @@ struct mm_struct {
61784 unsigned long def_flags;
61785 unsigned long nr_ptes; /* Page table pages */
61786 unsigned long start_code, end_code, start_data, end_data;
61787- unsigned long start_brk, brk, start_stack;
61788+ unsigned long brk_gap, start_brk, brk, start_stack;
61789 unsigned long arg_start, arg_end, env_start, env_end;
61790
61791 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
61792@@ -388,6 +390,24 @@ struct mm_struct {
61793 #ifdef CONFIG_CPUMASK_OFFSTACK
61794 struct cpumask cpumask_allocation;
61795 #endif
61796+
61797+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS) || defined(CONFIG_PAX_HAVE_ACL_FLAGS) || defined(CONFIG_PAX_HOOK_ACL_FLAGS)
61798+ unsigned long pax_flags;
61799+#endif
61800+
61801+#ifdef CONFIG_PAX_DLRESOLVE
61802+ unsigned long call_dl_resolve;
61803+#endif
61804+
61805+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
61806+ unsigned long call_syscall;
61807+#endif
61808+
61809+#ifdef CONFIG_PAX_ASLR
61810+ unsigned long delta_mmap; /* randomized offset */
61811+ unsigned long delta_stack; /* randomized offset */
61812+#endif
61813+
61814 };
61815
61816 static inline void mm_init_cpumask(struct mm_struct *mm)
61817diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
61818index 1d1b1e1..2a13c78 100644
61819--- a/include/linux/mmu_notifier.h
61820+++ b/include/linux/mmu_notifier.h
61821@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
61822 */
61823 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
61824 ({ \
61825- pte_t __pte; \
61826+ pte_t ___pte; \
61827 struct vm_area_struct *___vma = __vma; \
61828 unsigned long ___address = __address; \
61829- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
61830+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
61831 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
61832- __pte; \
61833+ ___pte; \
61834 })
61835
61836 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
61837diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
61838index dff7115..0e001c8 100644
61839--- a/include/linux/mmzone.h
61840+++ b/include/linux/mmzone.h
61841@@ -380,7 +380,7 @@ struct zone {
61842 unsigned long flags; /* zone flags, see below */
61843
61844 /* Zone statistics */
61845- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61846+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61847
61848 /*
61849 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
61850diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
61851index 501da4c..ba79bb4 100644
61852--- a/include/linux/mod_devicetable.h
61853+++ b/include/linux/mod_devicetable.h
61854@@ -12,7 +12,7 @@
61855 typedef unsigned long kernel_ulong_t;
61856 #endif
61857
61858-#define PCI_ANY_ID (~0)
61859+#define PCI_ANY_ID ((__u16)~0)
61860
61861 struct pci_device_id {
61862 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
61863@@ -131,7 +131,7 @@ struct usb_device_id {
61864 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
61865 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
61866
61867-#define HID_ANY_ID (~0)
61868+#define HID_ANY_ID (~0U)
61869
61870 struct hid_device_id {
61871 __u16 bus;
61872diff --git a/include/linux/module.h b/include/linux/module.h
61873index fbcafe2..e5d9587 100644
61874--- a/include/linux/module.h
61875+++ b/include/linux/module.h
61876@@ -17,6 +17,7 @@
61877 #include <linux/moduleparam.h>
61878 #include <linux/tracepoint.h>
61879 #include <linux/export.h>
61880+#include <linux/fs.h>
61881
61882 #include <linux/percpu.h>
61883 #include <asm/module.h>
61884@@ -273,19 +274,16 @@ struct module
61885 int (*init)(void);
61886
61887 /* If this is non-NULL, vfree after init() returns */
61888- void *module_init;
61889+ void *module_init_rx, *module_init_rw;
61890
61891 /* Here is the actual code + data, vfree'd on unload. */
61892- void *module_core;
61893+ void *module_core_rx, *module_core_rw;
61894
61895 /* Here are the sizes of the init and core sections */
61896- unsigned int init_size, core_size;
61897+ unsigned int init_size_rw, core_size_rw;
61898
61899 /* The size of the executable code in each section. */
61900- unsigned int init_text_size, core_text_size;
61901-
61902- /* Size of RO sections of the module (text+rodata) */
61903- unsigned int init_ro_size, core_ro_size;
61904+ unsigned int init_size_rx, core_size_rx;
61905
61906 /* Arch-specific module values */
61907 struct mod_arch_specific arch;
61908@@ -341,6 +339,10 @@ struct module
61909 #ifdef CONFIG_EVENT_TRACING
61910 struct ftrace_event_call **trace_events;
61911 unsigned int num_trace_events;
61912+ struct file_operations trace_id;
61913+ struct file_operations trace_enable;
61914+ struct file_operations trace_format;
61915+ struct file_operations trace_filter;
61916 #endif
61917 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
61918 unsigned int num_ftrace_callsites;
61919@@ -388,16 +390,46 @@ bool is_module_address(unsigned long addr);
61920 bool is_module_percpu_address(unsigned long addr);
61921 bool is_module_text_address(unsigned long addr);
61922
61923+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
61924+{
61925+
61926+#ifdef CONFIG_PAX_KERNEXEC
61927+ if (ktla_ktva(addr) >= (unsigned long)start &&
61928+ ktla_ktva(addr) < (unsigned long)start + size)
61929+ return 1;
61930+#endif
61931+
61932+ return ((void *)addr >= start && (void *)addr < start + size);
61933+}
61934+
61935+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
61936+{
61937+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
61938+}
61939+
61940+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
61941+{
61942+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
61943+}
61944+
61945+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
61946+{
61947+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
61948+}
61949+
61950+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
61951+{
61952+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
61953+}
61954+
61955 static inline int within_module_core(unsigned long addr, struct module *mod)
61956 {
61957- return (unsigned long)mod->module_core <= addr &&
61958- addr < (unsigned long)mod->module_core + mod->core_size;
61959+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
61960 }
61961
61962 static inline int within_module_init(unsigned long addr, struct module *mod)
61963 {
61964- return (unsigned long)mod->module_init <= addr &&
61965- addr < (unsigned long)mod->module_init + mod->init_size;
61966+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
61967 }
61968
61969 /* Search for module by name: must hold module_mutex. */
61970diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
61971index b2be02e..72d2f78 100644
61972--- a/include/linux/moduleloader.h
61973+++ b/include/linux/moduleloader.h
61974@@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
61975
61976 /* Allocator used for allocating struct module, core sections and init
61977 sections. Returns NULL on failure. */
61978-void *module_alloc(unsigned long size);
61979+void *module_alloc(unsigned long size) __size_overflow(1);
61980+
61981+#ifdef CONFIG_PAX_KERNEXEC
61982+void *module_alloc_exec(unsigned long size) __size_overflow(1);
61983+#else
61984+#define module_alloc_exec(x) module_alloc(x)
61985+#endif
61986
61987 /* Free memory returned from module_alloc. */
61988 void module_free(struct module *mod, void *module_region);
61989
61990+#ifdef CONFIG_PAX_KERNEXEC
61991+void module_free_exec(struct module *mod, void *module_region);
61992+#else
61993+#define module_free_exec(x, y) module_free((x), (y))
61994+#endif
61995+
61996 /* Apply the given relocation to the (simplified) ELF. Return -error
61997 or 0. */
61998 int apply_relocate(Elf_Shdr *sechdrs,
61999diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
62000index 944bc18..042d291 100644
62001--- a/include/linux/moduleparam.h
62002+++ b/include/linux/moduleparam.h
62003@@ -286,7 +286,7 @@ static inline void __kernel_param_unlock(void)
62004 * @len is usually just sizeof(string).
62005 */
62006 #define module_param_string(name, string, len, perm) \
62007- static const struct kparam_string __param_string_##name \
62008+ static const struct kparam_string __param_string_##name __used \
62009 = { len, string }; \
62010 __module_param_call(MODULE_PARAM_PREFIX, name, \
62011 &param_ops_string, \
62012@@ -424,7 +424,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
62013 */
62014 #define module_param_array_named(name, array, type, nump, perm) \
62015 param_check_##type(name, &(array)[0]); \
62016- static const struct kparam_array __param_arr_##name \
62017+ static const struct kparam_array __param_arr_##name __used \
62018 = { .max = ARRAY_SIZE(array), .num = nump, \
62019 .ops = &param_ops_##type, \
62020 .elemsize = sizeof(array[0]), .elem = array }; \
62021diff --git a/include/linux/namei.h b/include/linux/namei.h
62022index ffc0213..2c1f2cb 100644
62023--- a/include/linux/namei.h
62024+++ b/include/linux/namei.h
62025@@ -24,7 +24,7 @@ struct nameidata {
62026 unsigned seq;
62027 int last_type;
62028 unsigned depth;
62029- char *saved_names[MAX_NESTED_LINKS + 1];
62030+ const char *saved_names[MAX_NESTED_LINKS + 1];
62031
62032 /* Intent data */
62033 union {
62034@@ -94,12 +94,12 @@ extern int follow_up(struct path *);
62035 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
62036 extern void unlock_rename(struct dentry *, struct dentry *);
62037
62038-static inline void nd_set_link(struct nameidata *nd, char *path)
62039+static inline void nd_set_link(struct nameidata *nd, const char *path)
62040 {
62041 nd->saved_names[nd->depth] = path;
62042 }
62043
62044-static inline char *nd_get_link(struct nameidata *nd)
62045+static inline const char *nd_get_link(const struct nameidata *nd)
62046 {
62047 return nd->saved_names[nd->depth];
62048 }
62049diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
62050index 33900a5..2072000 100644
62051--- a/include/linux/netdevice.h
62052+++ b/include/linux/netdevice.h
62053@@ -1003,6 +1003,7 @@ struct net_device_ops {
62054 int (*ndo_neigh_construct)(struct neighbour *n);
62055 void (*ndo_neigh_destroy)(struct neighbour *n);
62056 };
62057+typedef struct net_device_ops __no_const net_device_ops_no_const;
62058
62059 /*
62060 * The DEVICE structure.
62061@@ -1064,7 +1065,7 @@ struct net_device {
62062 int iflink;
62063
62064 struct net_device_stats stats;
62065- atomic_long_t rx_dropped; /* dropped packets by core network
62066+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
62067 * Do not use this in drivers.
62068 */
62069
62070diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
62071new file mode 100644
62072index 0000000..33f4af8
62073--- /dev/null
62074+++ b/include/linux/netfilter/xt_gradm.h
62075@@ -0,0 +1,9 @@
62076+#ifndef _LINUX_NETFILTER_XT_GRADM_H
62077+#define _LINUX_NETFILTER_XT_GRADM_H 1
62078+
62079+struct xt_gradm_mtinfo {
62080+ __u16 flags;
62081+ __u16 invflags;
62082+};
62083+
62084+#endif
62085diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
62086index c65a18a..0c05f3a 100644
62087--- a/include/linux/of_pdt.h
62088+++ b/include/linux/of_pdt.h
62089@@ -32,7 +32,7 @@ struct of_pdt_ops {
62090
62091 /* return 0 on success; fill in 'len' with number of bytes in path */
62092 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
62093-};
62094+} __no_const;
62095
62096 extern void *prom_early_alloc(unsigned long size);
62097
62098diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
62099index a4c5624..79d6d88 100644
62100--- a/include/linux/oprofile.h
62101+++ b/include/linux/oprofile.h
62102@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
62103 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
62104 char const * name, ulong * val);
62105
62106-/** Create a file for read-only access to an atomic_t. */
62107+/** Create a file for read-only access to an atomic_unchecked_t. */
62108 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
62109- char const * name, atomic_t * val);
62110+ char const * name, atomic_unchecked_t * val);
62111
62112 /** create a directory */
62113 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
62114diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
62115index ddbb6a9..be1680e 100644
62116--- a/include/linux/perf_event.h
62117+++ b/include/linux/perf_event.h
62118@@ -879,8 +879,8 @@ struct perf_event {
62119
62120 enum perf_event_active_state state;
62121 unsigned int attach_state;
62122- local64_t count;
62123- atomic64_t child_count;
62124+ local64_t count; /* PaX: fix it one day */
62125+ atomic64_unchecked_t child_count;
62126
62127 /*
62128 * These are the total time in nanoseconds that the event
62129@@ -931,8 +931,8 @@ struct perf_event {
62130 * These accumulate total time (in nanoseconds) that children
62131 * events have been enabled and running, respectively.
62132 */
62133- atomic64_t child_total_time_enabled;
62134- atomic64_t child_total_time_running;
62135+ atomic64_unchecked_t child_total_time_enabled;
62136+ atomic64_unchecked_t child_total_time_running;
62137
62138 /*
62139 * Protect attach/detach and child_list:
62140diff --git a/include/linux/personality.h b/include/linux/personality.h
62141index 8fc7dd1a..c19d89e 100644
62142--- a/include/linux/personality.h
62143+++ b/include/linux/personality.h
62144@@ -44,6 +44,7 @@ enum {
62145 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
62146 ADDR_NO_RANDOMIZE | \
62147 ADDR_COMPAT_LAYOUT | \
62148+ ADDR_LIMIT_3GB | \
62149 MMAP_PAGE_ZERO)
62150
62151 /*
62152diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
62153index e1ac1ce..0675fed 100644
62154--- a/include/linux/pipe_fs_i.h
62155+++ b/include/linux/pipe_fs_i.h
62156@@ -45,9 +45,9 @@ struct pipe_buffer {
62157 struct pipe_inode_info {
62158 wait_queue_head_t wait;
62159 unsigned int nrbufs, curbuf, buffers;
62160- unsigned int readers;
62161- unsigned int writers;
62162- unsigned int waiting_writers;
62163+ atomic_t readers;
62164+ atomic_t writers;
62165+ atomic_t waiting_writers;
62166 unsigned int r_counter;
62167 unsigned int w_counter;
62168 struct page *tmp_page;
62169diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
62170index 609daae..5392427 100644
62171--- a/include/linux/pm_runtime.h
62172+++ b/include/linux/pm_runtime.h
62173@@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
62174
62175 static inline void pm_runtime_mark_last_busy(struct device *dev)
62176 {
62177- ACCESS_ONCE(dev->power.last_busy) = jiffies;
62178+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
62179 }
62180
62181 #else /* !CONFIG_PM_RUNTIME */
62182diff --git a/include/linux/poison.h b/include/linux/poison.h
62183index 2110a81..13a11bb 100644
62184--- a/include/linux/poison.h
62185+++ b/include/linux/poison.h
62186@@ -19,8 +19,8 @@
62187 * under normal circumstances, used to verify that nobody uses
62188 * non-initialized list entries.
62189 */
62190-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
62191-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
62192+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
62193+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
62194
62195 /********** include/linux/timer.h **********/
62196 /*
62197diff --git a/include/linux/preempt.h b/include/linux/preempt.h
62198index 5a710b9..0b0dab9 100644
62199--- a/include/linux/preempt.h
62200+++ b/include/linux/preempt.h
62201@@ -126,7 +126,7 @@ struct preempt_ops {
62202 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
62203 void (*sched_out)(struct preempt_notifier *notifier,
62204 struct task_struct *next);
62205-};
62206+} __no_const;
62207
62208 /**
62209 * preempt_notifier - key for installing preemption notifiers
62210diff --git a/include/linux/printk.h b/include/linux/printk.h
62211index 0525927..a5388b6 100644
62212--- a/include/linux/printk.h
62213+++ b/include/linux/printk.h
62214@@ -94,6 +94,8 @@ void early_printk(const char *fmt, ...);
62215 extern int printk_needs_cpu(int cpu);
62216 extern void printk_tick(void);
62217
62218+extern int kptr_restrict;
62219+
62220 #ifdef CONFIG_PRINTK
62221 asmlinkage __printf(1, 0)
62222 int vprintk(const char *fmt, va_list args);
62223@@ -117,7 +119,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
62224
62225 extern int printk_delay_msec;
62226 extern int dmesg_restrict;
62227-extern int kptr_restrict;
62228
62229 void log_buf_kexec_setup(void);
62230 void __init setup_log_buf(int early);
62231diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
62232index 85c5073..51fac8b 100644
62233--- a/include/linux/proc_fs.h
62234+++ b/include/linux/proc_fs.h
62235@@ -155,6 +155,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
62236 return proc_create_data(name, mode, parent, proc_fops, NULL);
62237 }
62238
62239+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
62240+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
62241+{
62242+#ifdef CONFIG_GRKERNSEC_PROC_USER
62243+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
62244+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62245+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
62246+#else
62247+ return proc_create_data(name, mode, parent, proc_fops, NULL);
62248+#endif
62249+}
62250+
62251 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
62252 umode_t mode, struct proc_dir_entry *base,
62253 read_proc_t *read_proc, void * data)
62254@@ -258,7 +270,7 @@ union proc_op {
62255 int (*proc_show)(struct seq_file *m,
62256 struct pid_namespace *ns, struct pid *pid,
62257 struct task_struct *task);
62258-};
62259+} __no_const;
62260
62261 struct ctl_table_header;
62262 struct ctl_table;
62263diff --git a/include/linux/random.h b/include/linux/random.h
62264index 8f74538..02a1012 100644
62265--- a/include/linux/random.h
62266+++ b/include/linux/random.h
62267@@ -69,12 +69,17 @@ void srandom32(u32 seed);
62268
62269 u32 prandom32(struct rnd_state *);
62270
62271+static inline unsigned long pax_get_random_long(void)
62272+{
62273+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
62274+}
62275+
62276 /*
62277 * Handle minimum values for seeds
62278 */
62279 static inline u32 __seed(u32 x, u32 m)
62280 {
62281- return (x < m) ? x + m : x;
62282+ return (x <= m) ? x + m + 1 : x;
62283 }
62284
62285 /**
62286diff --git a/include/linux/reboot.h b/include/linux/reboot.h
62287index e0879a7..a12f962 100644
62288--- a/include/linux/reboot.h
62289+++ b/include/linux/reboot.h
62290@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
62291 * Architecture-specific implementations of sys_reboot commands.
62292 */
62293
62294-extern void machine_restart(char *cmd);
62295-extern void machine_halt(void);
62296-extern void machine_power_off(void);
62297+extern void machine_restart(char *cmd) __noreturn;
62298+extern void machine_halt(void) __noreturn;
62299+extern void machine_power_off(void) __noreturn;
62300
62301 extern void machine_shutdown(void);
62302 struct pt_regs;
62303@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
62304 */
62305
62306 extern void kernel_restart_prepare(char *cmd);
62307-extern void kernel_restart(char *cmd);
62308-extern void kernel_halt(void);
62309-extern void kernel_power_off(void);
62310+extern void kernel_restart(char *cmd) __noreturn;
62311+extern void kernel_halt(void) __noreturn;
62312+extern void kernel_power_off(void) __noreturn;
62313
62314 extern int C_A_D; /* for sysctl */
62315 void ctrl_alt_del(void);
62316@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
62317 * Emergency restart, callable from an interrupt handler.
62318 */
62319
62320-extern void emergency_restart(void);
62321+extern void emergency_restart(void) __noreturn;
62322 #include <asm/emergency-restart.h>
62323
62324 #endif
62325diff --git a/include/linux/relay.h b/include/linux/relay.h
62326index 91cacc3..b55ff74 100644
62327--- a/include/linux/relay.h
62328+++ b/include/linux/relay.h
62329@@ -160,7 +160,7 @@ struct rchan_callbacks
62330 * The callback should return 0 if successful, negative if not.
62331 */
62332 int (*remove_buf_file)(struct dentry *dentry);
62333-};
62334+} __no_const;
62335
62336 /*
62337 * CONFIG_RELAY kernel API, kernel/relay.c
62338diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
62339index 6fdf027..ff72610 100644
62340--- a/include/linux/rfkill.h
62341+++ b/include/linux/rfkill.h
62342@@ -147,6 +147,7 @@ struct rfkill_ops {
62343 void (*query)(struct rfkill *rfkill, void *data);
62344 int (*set_block)(void *data, bool blocked);
62345 };
62346+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
62347
62348 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
62349 /**
62350diff --git a/include/linux/rio.h b/include/linux/rio.h
62351index 4d50611..c6858a2 100644
62352--- a/include/linux/rio.h
62353+++ b/include/linux/rio.h
62354@@ -315,7 +315,7 @@ struct rio_ops {
62355 int mbox, void *buffer, size_t len);
62356 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
62357 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
62358-};
62359+} __no_const;
62360
62361 #define RIO_RESOURCE_MEM 0x00000100
62362 #define RIO_RESOURCE_DOORBELL 0x00000200
62363diff --git a/include/linux/rmap.h b/include/linux/rmap.h
62364index fd07c45..4676b8e 100644
62365--- a/include/linux/rmap.h
62366+++ b/include/linux/rmap.h
62367@@ -119,9 +119,9 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
62368 void anon_vma_init(void); /* create anon_vma_cachep */
62369 int anon_vma_prepare(struct vm_area_struct *);
62370 void unlink_anon_vmas(struct vm_area_struct *);
62371-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
62372+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
62373 void anon_vma_moveto_tail(struct vm_area_struct *);
62374-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
62375+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
62376
62377 static inline void anon_vma_merge(struct vm_area_struct *vma,
62378 struct vm_area_struct *next)
62379diff --git a/include/linux/sched.h b/include/linux/sched.h
62380index 81a173c..85ccd8f 100644
62381--- a/include/linux/sched.h
62382+++ b/include/linux/sched.h
62383@@ -100,6 +100,7 @@ struct bio_list;
62384 struct fs_struct;
62385 struct perf_event_context;
62386 struct blk_plug;
62387+struct linux_binprm;
62388
62389 /*
62390 * List of flags we want to share for kernel threads,
62391@@ -382,10 +383,13 @@ struct user_namespace;
62392 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
62393
62394 extern int sysctl_max_map_count;
62395+extern unsigned long sysctl_heap_stack_gap;
62396
62397 #include <linux/aio.h>
62398
62399 #ifdef CONFIG_MMU
62400+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
62401+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
62402 extern void arch_pick_mmap_layout(struct mm_struct *mm);
62403 extern unsigned long
62404 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
62405@@ -643,6 +647,17 @@ struct signal_struct {
62406 #ifdef CONFIG_TASKSTATS
62407 struct taskstats *stats;
62408 #endif
62409+
62410+#ifdef CONFIG_GRKERNSEC
62411+ u32 curr_ip;
62412+ u32 saved_ip;
62413+ u32 gr_saddr;
62414+ u32 gr_daddr;
62415+ u16 gr_sport;
62416+ u16 gr_dport;
62417+ u8 used_accept:1;
62418+#endif
62419+
62420 #ifdef CONFIG_AUDIT
62421 unsigned audit_tty;
62422 struct tty_audit_buf *tty_audit_buf;
62423@@ -726,6 +741,11 @@ struct user_struct {
62424 struct key *session_keyring; /* UID's default session keyring */
62425 #endif
62426
62427+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
62428+ unsigned int banned;
62429+ unsigned long ban_expires;
62430+#endif
62431+
62432 /* Hash table maintenance information */
62433 struct hlist_node uidhash_node;
62434 uid_t uid;
62435@@ -1386,8 +1406,8 @@ struct task_struct {
62436 struct list_head thread_group;
62437
62438 struct completion *vfork_done; /* for vfork() */
62439- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
62440- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
62441+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
62442+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
62443
62444 cputime_t utime, stime, utimescaled, stimescaled;
62445 cputime_t gtime;
62446@@ -1403,13 +1423,6 @@ struct task_struct {
62447 struct task_cputime cputime_expires;
62448 struct list_head cpu_timers[3];
62449
62450-/* process credentials */
62451- const struct cred __rcu *real_cred; /* objective and real subjective task
62452- * credentials (COW) */
62453- const struct cred __rcu *cred; /* effective (overridable) subjective task
62454- * credentials (COW) */
62455- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
62456-
62457 char comm[TASK_COMM_LEN]; /* executable name excluding path
62458 - access with [gs]et_task_comm (which lock
62459 it with task_lock())
62460@@ -1426,8 +1439,16 @@ struct task_struct {
62461 #endif
62462 /* CPU-specific state of this task */
62463 struct thread_struct thread;
62464+/* thread_info moved to task_struct */
62465+#ifdef CONFIG_X86
62466+ struct thread_info tinfo;
62467+#endif
62468 /* filesystem information */
62469 struct fs_struct *fs;
62470+
62471+ const struct cred __rcu *cred; /* effective (overridable) subjective task
62472+ * credentials (COW) */
62473+
62474 /* open file information */
62475 struct files_struct *files;
62476 /* namespaces */
62477@@ -1469,6 +1490,11 @@ struct task_struct {
62478 struct rt_mutex_waiter *pi_blocked_on;
62479 #endif
62480
62481+/* process credentials */
62482+ const struct cred __rcu *real_cred; /* objective and real subjective task
62483+ * credentials (COW) */
62484+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
62485+
62486 #ifdef CONFIG_DEBUG_MUTEXES
62487 /* mutex deadlock detection */
62488 struct mutex_waiter *blocked_on;
62489@@ -1585,6 +1611,27 @@ struct task_struct {
62490 unsigned long default_timer_slack_ns;
62491
62492 struct list_head *scm_work_list;
62493+
62494+#ifdef CONFIG_GRKERNSEC
62495+ /* grsecurity */
62496+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62497+ u64 exec_id;
62498+#endif
62499+#ifdef CONFIG_GRKERNSEC_SETXID
62500+ const struct cred *delayed_cred;
62501+#endif
62502+ struct dentry *gr_chroot_dentry;
62503+ struct acl_subject_label *acl;
62504+ struct acl_role_label *role;
62505+ struct file *exec_file;
62506+ u16 acl_role_id;
62507+ /* is this the task that authenticated to the special role */
62508+ u8 acl_sp_role;
62509+ u8 is_writable;
62510+ u8 brute;
62511+ u8 gr_is_chrooted;
62512+#endif
62513+
62514 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
62515 /* Index of current stored address in ret_stack */
62516 int curr_ret_stack;
62517@@ -1619,6 +1666,51 @@ struct task_struct {
62518 #endif
62519 };
62520
62521+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
62522+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
62523+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
62524+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
62525+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
62526+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
62527+
62528+#ifdef CONFIG_PAX_SOFTMODE
62529+extern int pax_softmode;
62530+#endif
62531+
62532+extern int pax_check_flags(unsigned long *);
62533+
62534+/* if tsk != current then task_lock must be held on it */
62535+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
62536+static inline unsigned long pax_get_flags(struct task_struct *tsk)
62537+{
62538+ if (likely(tsk->mm))
62539+ return tsk->mm->pax_flags;
62540+ else
62541+ return 0UL;
62542+}
62543+
62544+/* if tsk != current then task_lock must be held on it */
62545+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
62546+{
62547+ if (likely(tsk->mm)) {
62548+ tsk->mm->pax_flags = flags;
62549+ return 0;
62550+ }
62551+ return -EINVAL;
62552+}
62553+#endif
62554+
62555+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62556+extern void pax_set_initial_flags(struct linux_binprm *bprm);
62557+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
62558+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
62559+#endif
62560+
62561+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
62562+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
62563+extern void pax_report_refcount_overflow(struct pt_regs *regs);
62564+extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
62565+
62566 /* Future-safe accessor for struct task_struct's cpus_allowed. */
62567 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
62568
62569@@ -2138,7 +2230,9 @@ void yield(void);
62570 extern struct exec_domain default_exec_domain;
62571
62572 union thread_union {
62573+#ifndef CONFIG_X86
62574 struct thread_info thread_info;
62575+#endif
62576 unsigned long stack[THREAD_SIZE/sizeof(long)];
62577 };
62578
62579@@ -2171,6 +2265,7 @@ extern struct pid_namespace init_pid_ns;
62580 */
62581
62582 extern struct task_struct *find_task_by_vpid(pid_t nr);
62583+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
62584 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
62585 struct pid_namespace *ns);
62586
62587@@ -2314,7 +2409,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
62588 extern void exit_itimers(struct signal_struct *);
62589 extern void flush_itimer_signals(void);
62590
62591-extern void do_group_exit(int);
62592+extern __noreturn void do_group_exit(int);
62593
62594 extern void daemonize(const char *, ...);
62595 extern int allow_signal(int);
62596@@ -2515,13 +2610,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
62597
62598 #endif
62599
62600-static inline int object_is_on_stack(void *obj)
62601+static inline int object_starts_on_stack(void *obj)
62602 {
62603- void *stack = task_stack_page(current);
62604+ const void *stack = task_stack_page(current);
62605
62606 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
62607 }
62608
62609+#ifdef CONFIG_PAX_USERCOPY
62610+extern int object_is_on_stack(const void *obj, unsigned long len);
62611+#endif
62612+
62613 extern void thread_info_cache_init(void);
62614
62615 #ifdef CONFIG_DEBUG_STACK_USAGE
62616diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
62617index 899fbb4..1cb4138 100644
62618--- a/include/linux/screen_info.h
62619+++ b/include/linux/screen_info.h
62620@@ -43,7 +43,8 @@ struct screen_info {
62621 __u16 pages; /* 0x32 */
62622 __u16 vesa_attributes; /* 0x34 */
62623 __u32 capabilities; /* 0x36 */
62624- __u8 _reserved[6]; /* 0x3a */
62625+ __u16 vesapm_size; /* 0x3a */
62626+ __u8 _reserved[4]; /* 0x3c */
62627 } __attribute__((packed));
62628
62629 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
62630diff --git a/include/linux/security.h b/include/linux/security.h
62631index 673afbb..2b7454b 100644
62632--- a/include/linux/security.h
62633+++ b/include/linux/security.h
62634@@ -26,6 +26,7 @@
62635 #include <linux/capability.h>
62636 #include <linux/slab.h>
62637 #include <linux/err.h>
62638+#include <linux/grsecurity.h>
62639
62640 struct linux_binprm;
62641 struct cred;
62642diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
62643index fc61854..d7c490b 100644
62644--- a/include/linux/seq_file.h
62645+++ b/include/linux/seq_file.h
62646@@ -25,6 +25,9 @@ struct seq_file {
62647 struct mutex lock;
62648 const struct seq_operations *op;
62649 int poll_event;
62650+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62651+ u64 exec_id;
62652+#endif
62653 void *private;
62654 };
62655
62656@@ -34,6 +37,7 @@ struct seq_operations {
62657 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
62658 int (*show) (struct seq_file *m, void *v);
62659 };
62660+typedef struct seq_operations __no_const seq_operations_no_const;
62661
62662 #define SEQ_SKIP 1
62663
62664diff --git a/include/linux/shm.h b/include/linux/shm.h
62665index 92808b8..c28cac4 100644
62666--- a/include/linux/shm.h
62667+++ b/include/linux/shm.h
62668@@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
62669
62670 /* The task created the shm object. NULL if the task is dead. */
62671 struct task_struct *shm_creator;
62672+#ifdef CONFIG_GRKERNSEC
62673+ time_t shm_createtime;
62674+ pid_t shm_lapid;
62675+#endif
62676 };
62677
62678 /* shm_mode upper byte flags */
62679diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
62680index c168907..c7756db 100644
62681--- a/include/linux/skbuff.h
62682+++ b/include/linux/skbuff.h
62683@@ -666,7 +666,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
62684 */
62685 static inline int skb_queue_empty(const struct sk_buff_head *list)
62686 {
62687- return list->next == (struct sk_buff *)list;
62688+ return list->next == (const struct sk_buff *)list;
62689 }
62690
62691 /**
62692@@ -679,7 +679,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
62693 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
62694 const struct sk_buff *skb)
62695 {
62696- return skb->next == (struct sk_buff *)list;
62697+ return skb->next == (const struct sk_buff *)list;
62698 }
62699
62700 /**
62701@@ -692,7 +692,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
62702 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
62703 const struct sk_buff *skb)
62704 {
62705- return skb->prev == (struct sk_buff *)list;
62706+ return skb->prev == (const struct sk_buff *)list;
62707 }
62708
62709 /**
62710@@ -1587,7 +1587,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
62711 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
62712 */
62713 #ifndef NET_SKB_PAD
62714-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
62715+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
62716 #endif
62717
62718 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
62719diff --git a/include/linux/slab.h b/include/linux/slab.h
62720index a595dce..c403597 100644
62721--- a/include/linux/slab.h
62722+++ b/include/linux/slab.h
62723@@ -11,12 +11,20 @@
62724
62725 #include <linux/gfp.h>
62726 #include <linux/types.h>
62727+#include <linux/err.h>
62728
62729 /*
62730 * Flags to pass to kmem_cache_create().
62731 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
62732 */
62733 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
62734+
62735+#ifdef CONFIG_PAX_USERCOPY
62736+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
62737+#else
62738+#define SLAB_USERCOPY 0x00000000UL
62739+#endif
62740+
62741 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
62742 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
62743 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
62744@@ -87,10 +95,13 @@
62745 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
62746 * Both make kfree a no-op.
62747 */
62748-#define ZERO_SIZE_PTR ((void *)16)
62749+#define ZERO_SIZE_PTR \
62750+({ \
62751+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
62752+ (void *)(-MAX_ERRNO-1L); \
62753+})
62754
62755-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
62756- (unsigned long)ZERO_SIZE_PTR)
62757+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
62758
62759 /*
62760 * struct kmem_cache related prototypes
62761@@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
62762 void kfree(const void *);
62763 void kzfree(const void *);
62764 size_t ksize(const void *);
62765+void check_object_size(const void *ptr, unsigned long n, bool to);
62766
62767 /*
62768 * Allocator specific definitions. These are mainly used to establish optimized
62769@@ -240,6 +252,7 @@ size_t ksize(const void *);
62770 * for general use, and so are not documented here. For a full list of
62771 * potential flags, always refer to linux/gfp.h.
62772 */
62773+static void *kmalloc_array(size_t n, size_t size, gfp_t flags) __size_overflow(1, 2);
62774 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
62775 {
62776 if (size != 0 && n > ULONG_MAX / size)
62777@@ -298,7 +311,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
62778 */
62779 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
62780 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
62781-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
62782+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
62783 #define kmalloc_track_caller(size, flags) \
62784 __kmalloc_track_caller(size, flags, _RET_IP_)
62785 #else
62786@@ -317,7 +330,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
62787 */
62788 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
62789 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
62790-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
62791+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
62792 #define kmalloc_node_track_caller(size, flags, node) \
62793 __kmalloc_node_track_caller(size, flags, node, \
62794 _RET_IP_)
62795diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
62796index fbd1117..d4d8ef8 100644
62797--- a/include/linux/slab_def.h
62798+++ b/include/linux/slab_def.h
62799@@ -66,10 +66,10 @@ struct kmem_cache {
62800 unsigned long node_allocs;
62801 unsigned long node_frees;
62802 unsigned long node_overflow;
62803- atomic_t allochit;
62804- atomic_t allocmiss;
62805- atomic_t freehit;
62806- atomic_t freemiss;
62807+ atomic_unchecked_t allochit;
62808+ atomic_unchecked_t allocmiss;
62809+ atomic_unchecked_t freehit;
62810+ atomic_unchecked_t freemiss;
62811
62812 /*
62813 * If debugging is enabled, then the allocator can add additional
62814@@ -107,7 +107,7 @@ struct cache_sizes {
62815 extern struct cache_sizes malloc_sizes[];
62816
62817 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
62818-void *__kmalloc(size_t size, gfp_t flags);
62819+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
62820
62821 #ifdef CONFIG_TRACING
62822 extern void *kmem_cache_alloc_trace(size_t size,
62823@@ -160,7 +160,7 @@ found:
62824 }
62825
62826 #ifdef CONFIG_NUMA
62827-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
62828+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
62829 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
62830
62831 #ifdef CONFIG_TRACING
62832diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
62833index 0ec00b3..39cb7fc 100644
62834--- a/include/linux/slob_def.h
62835+++ b/include/linux/slob_def.h
62836@@ -9,7 +9,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
62837 return kmem_cache_alloc_node(cachep, flags, -1);
62838 }
62839
62840-void *__kmalloc_node(size_t size, gfp_t flags, int node);
62841+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
62842
62843 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
62844 {
62845@@ -29,6 +29,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
62846 return __kmalloc_node(size, flags, -1);
62847 }
62848
62849+static __always_inline void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
62850 static __always_inline void *__kmalloc(size_t size, gfp_t flags)
62851 {
62852 return kmalloc(size, flags);
62853diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
62854index c2f8c8b..be9e036 100644
62855--- a/include/linux/slub_def.h
62856+++ b/include/linux/slub_def.h
62857@@ -92,7 +92,7 @@ struct kmem_cache {
62858 struct kmem_cache_order_objects max;
62859 struct kmem_cache_order_objects min;
62860 gfp_t allocflags; /* gfp flags to use on each alloc */
62861- int refcount; /* Refcount for slab cache destroy */
62862+ atomic_t refcount; /* Refcount for slab cache destroy */
62863 void (*ctor)(void *);
62864 int inuse; /* Offset to metadata */
62865 int align; /* Alignment */
62866@@ -153,6 +153,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
62867 * Sorry that the following has to be that ugly but some versions of GCC
62868 * have trouble with constant propagation and loops.
62869 */
62870+static __always_inline int kmalloc_index(size_t size) __size_overflow(1);
62871 static __always_inline int kmalloc_index(size_t size)
62872 {
62873 if (!size)
62874@@ -218,7 +219,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
62875 }
62876
62877 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
62878-void *__kmalloc(size_t size, gfp_t flags);
62879+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
62880
62881 static __always_inline void *
62882 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
62883@@ -259,6 +260,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
62884 }
62885 #endif
62886
62887+static __always_inline void *kmalloc_large(size_t size, gfp_t flags) __size_overflow(1);
62888 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
62889 {
62890 unsigned int order = get_order(size);
62891@@ -284,7 +286,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
62892 }
62893
62894 #ifdef CONFIG_NUMA
62895-void *__kmalloc_node(size_t size, gfp_t flags, int node);
62896+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
62897 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
62898
62899 #ifdef CONFIG_TRACING
62900diff --git a/include/linux/sonet.h b/include/linux/sonet.h
62901index de8832d..0147b46 100644
62902--- a/include/linux/sonet.h
62903+++ b/include/linux/sonet.h
62904@@ -61,7 +61,7 @@ struct sonet_stats {
62905 #include <linux/atomic.h>
62906
62907 struct k_sonet_stats {
62908-#define __HANDLE_ITEM(i) atomic_t i
62909+#define __HANDLE_ITEM(i) atomic_unchecked_t i
62910 __SONET_ITEMS
62911 #undef __HANDLE_ITEM
62912 };
62913diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
62914index 523547e..2cb7140 100644
62915--- a/include/linux/sunrpc/clnt.h
62916+++ b/include/linux/sunrpc/clnt.h
62917@@ -174,9 +174,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
62918 {
62919 switch (sap->sa_family) {
62920 case AF_INET:
62921- return ntohs(((struct sockaddr_in *)sap)->sin_port);
62922+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
62923 case AF_INET6:
62924- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
62925+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
62926 }
62927 return 0;
62928 }
62929@@ -209,7 +209,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
62930 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
62931 const struct sockaddr *src)
62932 {
62933- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
62934+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
62935 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
62936
62937 dsin->sin_family = ssin->sin_family;
62938@@ -312,7 +312,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
62939 if (sa->sa_family != AF_INET6)
62940 return 0;
62941
62942- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
62943+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
62944 }
62945
62946 #endif /* __KERNEL__ */
62947diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
62948index dc0c3cc..8503fb6 100644
62949--- a/include/linux/sunrpc/sched.h
62950+++ b/include/linux/sunrpc/sched.h
62951@@ -106,6 +106,7 @@ struct rpc_call_ops {
62952 void (*rpc_count_stats)(struct rpc_task *, void *);
62953 void (*rpc_release)(void *);
62954 };
62955+typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
62956
62957 struct rpc_task_setup {
62958 struct rpc_task *task;
62959diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
62960index 0b8e3e6..33e0a01 100644
62961--- a/include/linux/sunrpc/svc_rdma.h
62962+++ b/include/linux/sunrpc/svc_rdma.h
62963@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
62964 extern unsigned int svcrdma_max_requests;
62965 extern unsigned int svcrdma_max_req_size;
62966
62967-extern atomic_t rdma_stat_recv;
62968-extern atomic_t rdma_stat_read;
62969-extern atomic_t rdma_stat_write;
62970-extern atomic_t rdma_stat_sq_starve;
62971-extern atomic_t rdma_stat_rq_starve;
62972-extern atomic_t rdma_stat_rq_poll;
62973-extern atomic_t rdma_stat_rq_prod;
62974-extern atomic_t rdma_stat_sq_poll;
62975-extern atomic_t rdma_stat_sq_prod;
62976+extern atomic_unchecked_t rdma_stat_recv;
62977+extern atomic_unchecked_t rdma_stat_read;
62978+extern atomic_unchecked_t rdma_stat_write;
62979+extern atomic_unchecked_t rdma_stat_sq_starve;
62980+extern atomic_unchecked_t rdma_stat_rq_starve;
62981+extern atomic_unchecked_t rdma_stat_rq_poll;
62982+extern atomic_unchecked_t rdma_stat_rq_prod;
62983+extern atomic_unchecked_t rdma_stat_sq_poll;
62984+extern atomic_unchecked_t rdma_stat_sq_prod;
62985
62986 #define RPCRDMA_VERSION 1
62987
62988diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
62989index c34b4c8..a65b67d 100644
62990--- a/include/linux/sysctl.h
62991+++ b/include/linux/sysctl.h
62992@@ -155,7 +155,11 @@ enum
62993 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
62994 };
62995
62996-
62997+#ifdef CONFIG_PAX_SOFTMODE
62998+enum {
62999+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
63000+};
63001+#endif
63002
63003 /* CTL_VM names: */
63004 enum
63005@@ -948,6 +952,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
63006
63007 extern int proc_dostring(struct ctl_table *, int,
63008 void __user *, size_t *, loff_t *);
63009+extern int proc_dostring_modpriv(struct ctl_table *, int,
63010+ void __user *, size_t *, loff_t *);
63011 extern int proc_dointvec(struct ctl_table *, int,
63012 void __user *, size_t *, loff_t *);
63013 extern int proc_dointvec_minmax(struct ctl_table *, int,
63014diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
63015index ff7dc08..893e1bd 100644
63016--- a/include/linux/tty_ldisc.h
63017+++ b/include/linux/tty_ldisc.h
63018@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
63019
63020 struct module *owner;
63021
63022- int refcount;
63023+ atomic_t refcount;
63024 };
63025
63026 struct tty_ldisc {
63027diff --git a/include/linux/types.h b/include/linux/types.h
63028index 7f480db..175c256 100644
63029--- a/include/linux/types.h
63030+++ b/include/linux/types.h
63031@@ -220,10 +220,26 @@ typedef struct {
63032 int counter;
63033 } atomic_t;
63034
63035+#ifdef CONFIG_PAX_REFCOUNT
63036+typedef struct {
63037+ int counter;
63038+} atomic_unchecked_t;
63039+#else
63040+typedef atomic_t atomic_unchecked_t;
63041+#endif
63042+
63043 #ifdef CONFIG_64BIT
63044 typedef struct {
63045 long counter;
63046 } atomic64_t;
63047+
63048+#ifdef CONFIG_PAX_REFCOUNT
63049+typedef struct {
63050+ long counter;
63051+} atomic64_unchecked_t;
63052+#else
63053+typedef atomic64_t atomic64_unchecked_t;
63054+#endif
63055 #endif
63056
63057 struct list_head {
63058diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
63059index 5ca0951..ab496a5 100644
63060--- a/include/linux/uaccess.h
63061+++ b/include/linux/uaccess.h
63062@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
63063 long ret; \
63064 mm_segment_t old_fs = get_fs(); \
63065 \
63066- set_fs(KERNEL_DS); \
63067 pagefault_disable(); \
63068- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
63069- pagefault_enable(); \
63070+ set_fs(KERNEL_DS); \
63071+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
63072 set_fs(old_fs); \
63073+ pagefault_enable(); \
63074 ret; \
63075 })
63076
63077diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
63078index 99c1b4d..bb94261 100644
63079--- a/include/linux/unaligned/access_ok.h
63080+++ b/include/linux/unaligned/access_ok.h
63081@@ -6,32 +6,32 @@
63082
63083 static inline u16 get_unaligned_le16(const void *p)
63084 {
63085- return le16_to_cpup((__le16 *)p);
63086+ return le16_to_cpup((const __le16 *)p);
63087 }
63088
63089 static inline u32 get_unaligned_le32(const void *p)
63090 {
63091- return le32_to_cpup((__le32 *)p);
63092+ return le32_to_cpup((const __le32 *)p);
63093 }
63094
63095 static inline u64 get_unaligned_le64(const void *p)
63096 {
63097- return le64_to_cpup((__le64 *)p);
63098+ return le64_to_cpup((const __le64 *)p);
63099 }
63100
63101 static inline u16 get_unaligned_be16(const void *p)
63102 {
63103- return be16_to_cpup((__be16 *)p);
63104+ return be16_to_cpup((const __be16 *)p);
63105 }
63106
63107 static inline u32 get_unaligned_be32(const void *p)
63108 {
63109- return be32_to_cpup((__be32 *)p);
63110+ return be32_to_cpup((const __be32 *)p);
63111 }
63112
63113 static inline u64 get_unaligned_be64(const void *p)
63114 {
63115- return be64_to_cpup((__be64 *)p);
63116+ return be64_to_cpup((const __be64 *)p);
63117 }
63118
63119 static inline void put_unaligned_le16(u16 val, void *p)
63120diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
63121index 547e59c..db6ad19 100644
63122--- a/include/linux/usb/renesas_usbhs.h
63123+++ b/include/linux/usb/renesas_usbhs.h
63124@@ -39,7 +39,7 @@ enum {
63125 */
63126 struct renesas_usbhs_driver_callback {
63127 int (*notify_hotplug)(struct platform_device *pdev);
63128-};
63129+} __no_const;
63130
63131 /*
63132 * callback functions for platform
63133@@ -97,7 +97,7 @@ struct renesas_usbhs_platform_callback {
63134 * VBUS control is needed for Host
63135 */
63136 int (*set_vbus)(struct platform_device *pdev, int enable);
63137-};
63138+} __no_const;
63139
63140 /*
63141 * parameters for renesas usbhs
63142diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
63143index 6f8fbcf..8259001 100644
63144--- a/include/linux/vermagic.h
63145+++ b/include/linux/vermagic.h
63146@@ -25,9 +25,35 @@
63147 #define MODULE_ARCH_VERMAGIC ""
63148 #endif
63149
63150+#ifdef CONFIG_PAX_REFCOUNT
63151+#define MODULE_PAX_REFCOUNT "REFCOUNT "
63152+#else
63153+#define MODULE_PAX_REFCOUNT ""
63154+#endif
63155+
63156+#ifdef CONSTIFY_PLUGIN
63157+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
63158+#else
63159+#define MODULE_CONSTIFY_PLUGIN ""
63160+#endif
63161+
63162+#ifdef STACKLEAK_PLUGIN
63163+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
63164+#else
63165+#define MODULE_STACKLEAK_PLUGIN ""
63166+#endif
63167+
63168+#ifdef CONFIG_GRKERNSEC
63169+#define MODULE_GRSEC "GRSEC "
63170+#else
63171+#define MODULE_GRSEC ""
63172+#endif
63173+
63174 #define VERMAGIC_STRING \
63175 UTS_RELEASE " " \
63176 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
63177 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
63178- MODULE_ARCH_VERMAGIC
63179+ MODULE_ARCH_VERMAGIC \
63180+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
63181+ MODULE_GRSEC
63182
63183diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
63184index dcdfc2b..ec79ab5 100644
63185--- a/include/linux/vmalloc.h
63186+++ b/include/linux/vmalloc.h
63187@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
63188 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
63189 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
63190 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
63191+
63192+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
63193+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
63194+#endif
63195+
63196 /* bits [20..32] reserved for arch specific ioremap internals */
63197
63198 /*
63199@@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned long size);
63200 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
63201 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
63202 unsigned long start, unsigned long end, gfp_t gfp_mask,
63203- pgprot_t prot, int node, void *caller);
63204+ pgprot_t prot, int node, void *caller) __size_overflow(1);
63205 extern void vfree(const void *addr);
63206
63207 extern void *vmap(struct page **pages, unsigned int count,
63208@@ -123,8 +128,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
63209 extern void free_vm_area(struct vm_struct *area);
63210
63211 /* for /dev/kmem */
63212-extern long vread(char *buf, char *addr, unsigned long count);
63213-extern long vwrite(char *buf, char *addr, unsigned long count);
63214+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
63215+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
63216
63217 /*
63218 * Internals. Dont't use..
63219diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
63220index 65efb92..137adbb 100644
63221--- a/include/linux/vmstat.h
63222+++ b/include/linux/vmstat.h
63223@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
63224 /*
63225 * Zone based page accounting with per cpu differentials.
63226 */
63227-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63228+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63229
63230 static inline void zone_page_state_add(long x, struct zone *zone,
63231 enum zone_stat_item item)
63232 {
63233- atomic_long_add(x, &zone->vm_stat[item]);
63234- atomic_long_add(x, &vm_stat[item]);
63235+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
63236+ atomic_long_add_unchecked(x, &vm_stat[item]);
63237 }
63238
63239 static inline unsigned long global_page_state(enum zone_stat_item item)
63240 {
63241- long x = atomic_long_read(&vm_stat[item]);
63242+ long x = atomic_long_read_unchecked(&vm_stat[item]);
63243 #ifdef CONFIG_SMP
63244 if (x < 0)
63245 x = 0;
63246@@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
63247 static inline unsigned long zone_page_state(struct zone *zone,
63248 enum zone_stat_item item)
63249 {
63250- long x = atomic_long_read(&zone->vm_stat[item]);
63251+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63252 #ifdef CONFIG_SMP
63253 if (x < 0)
63254 x = 0;
63255@@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
63256 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
63257 enum zone_stat_item item)
63258 {
63259- long x = atomic_long_read(&zone->vm_stat[item]);
63260+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63261
63262 #ifdef CONFIG_SMP
63263 int cpu;
63264@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
63265
63266 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
63267 {
63268- atomic_long_inc(&zone->vm_stat[item]);
63269- atomic_long_inc(&vm_stat[item]);
63270+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
63271+ atomic_long_inc_unchecked(&vm_stat[item]);
63272 }
63273
63274 static inline void __inc_zone_page_state(struct page *page,
63275@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
63276
63277 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
63278 {
63279- atomic_long_dec(&zone->vm_stat[item]);
63280- atomic_long_dec(&vm_stat[item]);
63281+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
63282+ atomic_long_dec_unchecked(&vm_stat[item]);
63283 }
63284
63285 static inline void __dec_zone_page_state(struct page *page,
63286diff --git a/include/linux/xattr.h b/include/linux/xattr.h
63287index e5d1220..ef6e406 100644
63288--- a/include/linux/xattr.h
63289+++ b/include/linux/xattr.h
63290@@ -57,6 +57,11 @@
63291 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
63292 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
63293
63294+/* User namespace */
63295+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
63296+#define XATTR_PAX_FLAGS_SUFFIX "flags"
63297+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
63298+
63299 #ifdef __KERNEL__
63300
63301 #include <linux/types.h>
63302diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
63303index 4aeff96..b378cdc 100644
63304--- a/include/media/saa7146_vv.h
63305+++ b/include/media/saa7146_vv.h
63306@@ -163,7 +163,7 @@ struct saa7146_ext_vv
63307 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
63308
63309 /* the extension can override this */
63310- struct v4l2_ioctl_ops ops;
63311+ v4l2_ioctl_ops_no_const ops;
63312 /* pointer to the saa7146 core ops */
63313 const struct v4l2_ioctl_ops *core_ops;
63314
63315diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
63316index 96d2221..2292f89 100644
63317--- a/include/media/v4l2-dev.h
63318+++ b/include/media/v4l2-dev.h
63319@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
63320
63321
63322 struct v4l2_file_operations {
63323- struct module *owner;
63324+ struct module * const owner;
63325 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
63326 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
63327 unsigned int (*poll) (struct file *, struct poll_table_struct *);
63328@@ -71,6 +71,7 @@ struct v4l2_file_operations {
63329 int (*open) (struct file *);
63330 int (*release) (struct file *);
63331 };
63332+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
63333
63334 /*
63335 * Newer version of video_device, handled by videodev2.c
63336diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
63337index 3cb939c..f23c6bb 100644
63338--- a/include/media/v4l2-ioctl.h
63339+++ b/include/media/v4l2-ioctl.h
63340@@ -281,7 +281,7 @@ struct v4l2_ioctl_ops {
63341 long (*vidioc_default) (struct file *file, void *fh,
63342 bool valid_prio, int cmd, void *arg);
63343 };
63344-
63345+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
63346
63347 /* v4l debugging and diagnostics */
63348
63349diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
63350index 6db8ecf..8c23861 100644
63351--- a/include/net/caif/caif_hsi.h
63352+++ b/include/net/caif/caif_hsi.h
63353@@ -98,7 +98,7 @@ struct cfhsi_drv {
63354 void (*rx_done_cb) (struct cfhsi_drv *drv);
63355 void (*wake_up_cb) (struct cfhsi_drv *drv);
63356 void (*wake_down_cb) (struct cfhsi_drv *drv);
63357-};
63358+} __no_const;
63359
63360 /* Structure implemented by HSI device. */
63361 struct cfhsi_dev {
63362diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
63363index 9e5425b..8136ffc 100644
63364--- a/include/net/caif/cfctrl.h
63365+++ b/include/net/caif/cfctrl.h
63366@@ -52,7 +52,7 @@ struct cfctrl_rsp {
63367 void (*radioset_rsp)(void);
63368 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
63369 struct cflayer *client_layer);
63370-};
63371+} __no_const;
63372
63373 /* Link Setup Parameters for CAIF-Links. */
63374 struct cfctrl_link_param {
63375@@ -101,8 +101,8 @@ struct cfctrl_request_info {
63376 struct cfctrl {
63377 struct cfsrvl serv;
63378 struct cfctrl_rsp res;
63379- atomic_t req_seq_no;
63380- atomic_t rsp_seq_no;
63381+ atomic_unchecked_t req_seq_no;
63382+ atomic_unchecked_t rsp_seq_no;
63383 struct list_head list;
63384 /* Protects from simultaneous access to first_req list */
63385 spinlock_t info_list_lock;
63386diff --git a/include/net/flow.h b/include/net/flow.h
63387index 6c469db..7743b8e 100644
63388--- a/include/net/flow.h
63389+++ b/include/net/flow.h
63390@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
63391
63392 extern void flow_cache_flush(void);
63393 extern void flow_cache_flush_deferred(void);
63394-extern atomic_t flow_cache_genid;
63395+extern atomic_unchecked_t flow_cache_genid;
63396
63397 #endif
63398diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
63399index b94765e..053f68b 100644
63400--- a/include/net/inetpeer.h
63401+++ b/include/net/inetpeer.h
63402@@ -48,8 +48,8 @@ struct inet_peer {
63403 */
63404 union {
63405 struct {
63406- atomic_t rid; /* Frag reception counter */
63407- atomic_t ip_id_count; /* IP ID for the next packet */
63408+ atomic_unchecked_t rid; /* Frag reception counter */
63409+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
63410 __u32 tcp_ts;
63411 __u32 tcp_ts_stamp;
63412 };
63413@@ -115,11 +115,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
63414 more++;
63415 inet_peer_refcheck(p);
63416 do {
63417- old = atomic_read(&p->ip_id_count);
63418+ old = atomic_read_unchecked(&p->ip_id_count);
63419 new = old + more;
63420 if (!new)
63421 new = 1;
63422- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
63423+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
63424 return new;
63425 }
63426
63427diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
63428index 10422ef..662570f 100644
63429--- a/include/net/ip_fib.h
63430+++ b/include/net/ip_fib.h
63431@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
63432
63433 #define FIB_RES_SADDR(net, res) \
63434 ((FIB_RES_NH(res).nh_saddr_genid == \
63435- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
63436+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
63437 FIB_RES_NH(res).nh_saddr : \
63438 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
63439 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
63440diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
63441index 72522f0..6f03a2b 100644
63442--- a/include/net/ip_vs.h
63443+++ b/include/net/ip_vs.h
63444@@ -510,7 +510,7 @@ struct ip_vs_conn {
63445 struct ip_vs_conn *control; /* Master control connection */
63446 atomic_t n_control; /* Number of controlled ones */
63447 struct ip_vs_dest *dest; /* real server */
63448- atomic_t in_pkts; /* incoming packet counter */
63449+ atomic_unchecked_t in_pkts; /* incoming packet counter */
63450
63451 /* packet transmitter for different forwarding methods. If it
63452 mangles the packet, it must return NF_DROP or better NF_STOLEN,
63453@@ -648,7 +648,7 @@ struct ip_vs_dest {
63454 __be16 port; /* port number of the server */
63455 union nf_inet_addr addr; /* IP address of the server */
63456 volatile unsigned flags; /* dest status flags */
63457- atomic_t conn_flags; /* flags to copy to conn */
63458+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
63459 atomic_t weight; /* server weight */
63460
63461 atomic_t refcnt; /* reference counter */
63462diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
63463index 69b610a..fe3962c 100644
63464--- a/include/net/irda/ircomm_core.h
63465+++ b/include/net/irda/ircomm_core.h
63466@@ -51,7 +51,7 @@ typedef struct {
63467 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
63468 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
63469 struct ircomm_info *);
63470-} call_t;
63471+} __no_const call_t;
63472
63473 struct ircomm_cb {
63474 irda_queue_t queue;
63475diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
63476index 59ba38bc..d515662 100644
63477--- a/include/net/irda/ircomm_tty.h
63478+++ b/include/net/irda/ircomm_tty.h
63479@@ -35,6 +35,7 @@
63480 #include <linux/termios.h>
63481 #include <linux/timer.h>
63482 #include <linux/tty.h> /* struct tty_struct */
63483+#include <asm/local.h>
63484
63485 #include <net/irda/irias_object.h>
63486 #include <net/irda/ircomm_core.h>
63487@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
63488 unsigned short close_delay;
63489 unsigned short closing_wait; /* time to wait before closing */
63490
63491- int open_count;
63492- int blocked_open; /* # of blocked opens */
63493+ local_t open_count;
63494+ local_t blocked_open; /* # of blocked opens */
63495
63496 /* Protect concurent access to :
63497 * o self->open_count
63498diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
63499index cc7c197..9f2da2a 100644
63500--- a/include/net/iucv/af_iucv.h
63501+++ b/include/net/iucv/af_iucv.h
63502@@ -141,7 +141,7 @@ struct iucv_sock {
63503 struct iucv_sock_list {
63504 struct hlist_head head;
63505 rwlock_t lock;
63506- atomic_t autobind_name;
63507+ atomic_unchecked_t autobind_name;
63508 };
63509
63510 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
63511diff --git a/include/net/neighbour.h b/include/net/neighbour.h
63512index 34c996f..bb3b4d4 100644
63513--- a/include/net/neighbour.h
63514+++ b/include/net/neighbour.h
63515@@ -123,7 +123,7 @@ struct neigh_ops {
63516 void (*error_report)(struct neighbour *, struct sk_buff *);
63517 int (*output)(struct neighbour *, struct sk_buff *);
63518 int (*connected_output)(struct neighbour *, struct sk_buff *);
63519-};
63520+} __do_const;
63521
63522 struct pneigh_entry {
63523 struct pneigh_entry *next;
63524diff --git a/include/net/netlink.h b/include/net/netlink.h
63525index f394fe5..fd073f9 100644
63526--- a/include/net/netlink.h
63527+++ b/include/net/netlink.h
63528@@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
63529 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
63530 {
63531 if (mark)
63532- skb_trim(skb, (unsigned char *) mark - skb->data);
63533+ skb_trim(skb, (const unsigned char *) mark - skb->data);
63534 }
63535
63536 /**
63537diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
63538index bbd023a..97c6d0d 100644
63539--- a/include/net/netns/ipv4.h
63540+++ b/include/net/netns/ipv4.h
63541@@ -57,8 +57,8 @@ struct netns_ipv4 {
63542 unsigned int sysctl_ping_group_range[2];
63543 long sysctl_tcp_mem[3];
63544
63545- atomic_t rt_genid;
63546- atomic_t dev_addr_genid;
63547+ atomic_unchecked_t rt_genid;
63548+ atomic_unchecked_t dev_addr_genid;
63549
63550 #ifdef CONFIG_IP_MROUTE
63551 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
63552diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
63553index a2ef814..31a8e3f 100644
63554--- a/include/net/sctp/sctp.h
63555+++ b/include/net/sctp/sctp.h
63556@@ -318,9 +318,9 @@ do { \
63557
63558 #else /* SCTP_DEBUG */
63559
63560-#define SCTP_DEBUG_PRINTK(whatever...)
63561-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
63562-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
63563+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
63564+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
63565+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
63566 #define SCTP_ENABLE_DEBUG
63567 #define SCTP_DISABLE_DEBUG
63568 #define SCTP_ASSERT(expr, str, func)
63569diff --git a/include/net/sock.h b/include/net/sock.h
63570index 5a0a58a..2e3d4d0 100644
63571--- a/include/net/sock.h
63572+++ b/include/net/sock.h
63573@@ -302,7 +302,7 @@ struct sock {
63574 #ifdef CONFIG_RPS
63575 __u32 sk_rxhash;
63576 #endif
63577- atomic_t sk_drops;
63578+ atomic_unchecked_t sk_drops;
63579 int sk_rcvbuf;
63580
63581 struct sk_filter __rcu *sk_filter;
63582@@ -1691,7 +1691,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
63583 }
63584
63585 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
63586- char __user *from, char *to,
63587+ char __user *from, unsigned char *to,
63588 int copy, int offset)
63589 {
63590 if (skb->ip_summed == CHECKSUM_NONE) {
63591diff --git a/include/net/tcp.h b/include/net/tcp.h
63592index f75a04d..702cf06 100644
63593--- a/include/net/tcp.h
63594+++ b/include/net/tcp.h
63595@@ -1425,7 +1425,7 @@ struct tcp_seq_afinfo {
63596 char *name;
63597 sa_family_t family;
63598 const struct file_operations *seq_fops;
63599- struct seq_operations seq_ops;
63600+ seq_operations_no_const seq_ops;
63601 };
63602
63603 struct tcp_iter_state {
63604diff --git a/include/net/udp.h b/include/net/udp.h
63605index 5d606d9..e879f7b 100644
63606--- a/include/net/udp.h
63607+++ b/include/net/udp.h
63608@@ -244,7 +244,7 @@ struct udp_seq_afinfo {
63609 sa_family_t family;
63610 struct udp_table *udp_table;
63611 const struct file_operations *seq_fops;
63612- struct seq_operations seq_ops;
63613+ seq_operations_no_const seq_ops;
63614 };
63615
63616 struct udp_iter_state {
63617diff --git a/include/net/xfrm.h b/include/net/xfrm.h
63618index 96239e7..c85b032 100644
63619--- a/include/net/xfrm.h
63620+++ b/include/net/xfrm.h
63621@@ -505,7 +505,7 @@ struct xfrm_policy {
63622 struct timer_list timer;
63623
63624 struct flow_cache_object flo;
63625- atomic_t genid;
63626+ atomic_unchecked_t genid;
63627 u32 priority;
63628 u32 index;
63629 struct xfrm_mark mark;
63630diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
63631index 1a046b1..ee0bef0 100644
63632--- a/include/rdma/iw_cm.h
63633+++ b/include/rdma/iw_cm.h
63634@@ -122,7 +122,7 @@ struct iw_cm_verbs {
63635 int backlog);
63636
63637 int (*destroy_listen)(struct iw_cm_id *cm_id);
63638-};
63639+} __no_const;
63640
63641 /**
63642 * iw_create_cm_id - Create an IW CM identifier.
63643diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
63644index 8f9dfba..610ab6c 100644
63645--- a/include/scsi/libfc.h
63646+++ b/include/scsi/libfc.h
63647@@ -756,6 +756,7 @@ struct libfc_function_template {
63648 */
63649 void (*disc_stop_final) (struct fc_lport *);
63650 };
63651+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
63652
63653 /**
63654 * struct fc_disc - Discovery context
63655@@ -861,7 +862,7 @@ struct fc_lport {
63656 struct fc_vport *vport;
63657
63658 /* Operational Information */
63659- struct libfc_function_template tt;
63660+ libfc_function_template_no_const tt;
63661 u8 link_up;
63662 u8 qfull;
63663 enum fc_lport_state state;
63664diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
63665index 6efb2e1..cdad57f 100644
63666--- a/include/scsi/scsi_device.h
63667+++ b/include/scsi/scsi_device.h
63668@@ -162,9 +162,9 @@ struct scsi_device {
63669 unsigned int max_device_blocked; /* what device_blocked counts down from */
63670 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
63671
63672- atomic_t iorequest_cnt;
63673- atomic_t iodone_cnt;
63674- atomic_t ioerr_cnt;
63675+ atomic_unchecked_t iorequest_cnt;
63676+ atomic_unchecked_t iodone_cnt;
63677+ atomic_unchecked_t ioerr_cnt;
63678
63679 struct device sdev_gendev,
63680 sdev_dev;
63681diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
63682index 719faf1..d1154d4 100644
63683--- a/include/scsi/scsi_transport_fc.h
63684+++ b/include/scsi/scsi_transport_fc.h
63685@@ -739,7 +739,7 @@ struct fc_function_template {
63686 unsigned long show_host_system_hostname:1;
63687
63688 unsigned long disable_target_scan:1;
63689-};
63690+} __do_const;
63691
63692
63693 /**
63694diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
63695index 030b87c..98a6954 100644
63696--- a/include/sound/ak4xxx-adda.h
63697+++ b/include/sound/ak4xxx-adda.h
63698@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
63699 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
63700 unsigned char val);
63701 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
63702-};
63703+} __no_const;
63704
63705 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
63706
63707diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
63708index 8c05e47..2b5df97 100644
63709--- a/include/sound/hwdep.h
63710+++ b/include/sound/hwdep.h
63711@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
63712 struct snd_hwdep_dsp_status *status);
63713 int (*dsp_load)(struct snd_hwdep *hw,
63714 struct snd_hwdep_dsp_image *image);
63715-};
63716+} __no_const;
63717
63718 struct snd_hwdep {
63719 struct snd_card *card;
63720diff --git a/include/sound/info.h b/include/sound/info.h
63721index 9ca1a49..aba1728 100644
63722--- a/include/sound/info.h
63723+++ b/include/sound/info.h
63724@@ -44,7 +44,7 @@ struct snd_info_entry_text {
63725 struct snd_info_buffer *buffer);
63726 void (*write)(struct snd_info_entry *entry,
63727 struct snd_info_buffer *buffer);
63728-};
63729+} __no_const;
63730
63731 struct snd_info_entry_ops {
63732 int (*open)(struct snd_info_entry *entry,
63733diff --git a/include/sound/pcm.h b/include/sound/pcm.h
63734index 0d11128..814178e 100644
63735--- a/include/sound/pcm.h
63736+++ b/include/sound/pcm.h
63737@@ -81,6 +81,7 @@ struct snd_pcm_ops {
63738 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
63739 int (*ack)(struct snd_pcm_substream *substream);
63740 };
63741+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
63742
63743 /*
63744 *
63745diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
63746index af1b49e..a5d55a5 100644
63747--- a/include/sound/sb16_csp.h
63748+++ b/include/sound/sb16_csp.h
63749@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
63750 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
63751 int (*csp_stop) (struct snd_sb_csp * p);
63752 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
63753-};
63754+} __no_const;
63755
63756 /*
63757 * CSP private data
63758diff --git a/include/sound/soc.h b/include/sound/soc.h
63759index 2ebf787..0276839 100644
63760--- a/include/sound/soc.h
63761+++ b/include/sound/soc.h
63762@@ -711,7 +711,7 @@ struct snd_soc_platform_driver {
63763 /* platform IO - used for platform DAPM */
63764 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
63765 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
63766-};
63767+} __do_const;
63768
63769 struct snd_soc_platform {
63770 const char *name;
63771@@ -887,7 +887,7 @@ struct snd_soc_pcm_runtime {
63772 struct snd_soc_dai_link *dai_link;
63773 struct mutex pcm_mutex;
63774 enum snd_soc_pcm_subclass pcm_subclass;
63775- struct snd_pcm_ops ops;
63776+ snd_pcm_ops_no_const ops;
63777
63778 unsigned int complete:1;
63779 unsigned int dev_registered:1;
63780diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
63781index 4119966..1a4671c 100644
63782--- a/include/sound/ymfpci.h
63783+++ b/include/sound/ymfpci.h
63784@@ -358,7 +358,7 @@ struct snd_ymfpci {
63785 spinlock_t reg_lock;
63786 spinlock_t voice_lock;
63787 wait_queue_head_t interrupt_sleep;
63788- atomic_t interrupt_sleep_count;
63789+ atomic_unchecked_t interrupt_sleep_count;
63790 struct snd_info_entry *proc_entry;
63791 const struct firmware *dsp_microcode;
63792 const struct firmware *controller_microcode;
63793diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
63794index aaccc5f..092d568 100644
63795--- a/include/target/target_core_base.h
63796+++ b/include/target/target_core_base.h
63797@@ -447,7 +447,7 @@ struct t10_reservation_ops {
63798 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
63799 int (*t10_pr_register)(struct se_cmd *);
63800 int (*t10_pr_clear)(struct se_cmd *);
63801-};
63802+} __no_const;
63803
63804 struct t10_reservation {
63805 /* Reservation effects all target ports */
63806@@ -576,7 +576,7 @@ struct se_cmd {
63807 atomic_t t_se_count;
63808 atomic_t t_task_cdbs_left;
63809 atomic_t t_task_cdbs_ex_left;
63810- atomic_t t_task_cdbs_sent;
63811+ atomic_unchecked_t t_task_cdbs_sent;
63812 unsigned int transport_state;
63813 #define CMD_T_ABORTED (1 << 0)
63814 #define CMD_T_ACTIVE (1 << 1)
63815@@ -802,7 +802,7 @@ struct se_device {
63816 spinlock_t stats_lock;
63817 /* Active commands on this virtual SE device */
63818 atomic_t simple_cmds;
63819- atomic_t dev_ordered_id;
63820+ atomic_unchecked_t dev_ordered_id;
63821 atomic_t execute_tasks;
63822 atomic_t dev_ordered_sync;
63823 atomic_t dev_qf_count;
63824diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
63825new file mode 100644
63826index 0000000..2efe49d
63827--- /dev/null
63828+++ b/include/trace/events/fs.h
63829@@ -0,0 +1,53 @@
63830+#undef TRACE_SYSTEM
63831+#define TRACE_SYSTEM fs
63832+
63833+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
63834+#define _TRACE_FS_H
63835+
63836+#include <linux/fs.h>
63837+#include <linux/tracepoint.h>
63838+
63839+TRACE_EVENT(do_sys_open,
63840+
63841+ TP_PROTO(char *filename, int flags, int mode),
63842+
63843+ TP_ARGS(filename, flags, mode),
63844+
63845+ TP_STRUCT__entry(
63846+ __string( filename, filename )
63847+ __field( int, flags )
63848+ __field( int, mode )
63849+ ),
63850+
63851+ TP_fast_assign(
63852+ __assign_str(filename, filename);
63853+ __entry->flags = flags;
63854+ __entry->mode = mode;
63855+ ),
63856+
63857+ TP_printk("\"%s\" %x %o",
63858+ __get_str(filename), __entry->flags, __entry->mode)
63859+);
63860+
63861+TRACE_EVENT(open_exec,
63862+
63863+ TP_PROTO(const char *filename),
63864+
63865+ TP_ARGS(filename),
63866+
63867+ TP_STRUCT__entry(
63868+ __string( filename, filename )
63869+ ),
63870+
63871+ TP_fast_assign(
63872+ __assign_str(filename, filename);
63873+ ),
63874+
63875+ TP_printk("\"%s\"",
63876+ __get_str(filename))
63877+);
63878+
63879+#endif /* _TRACE_FS_H */
63880+
63881+/* This part must be outside protection */
63882+#include <trace/define_trace.h>
63883diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
63884index 1c09820..7f5ec79 100644
63885--- a/include/trace/events/irq.h
63886+++ b/include/trace/events/irq.h
63887@@ -36,7 +36,7 @@ struct softirq_action;
63888 */
63889 TRACE_EVENT(irq_handler_entry,
63890
63891- TP_PROTO(int irq, struct irqaction *action),
63892+ TP_PROTO(int irq, const struct irqaction *action),
63893
63894 TP_ARGS(irq, action),
63895
63896@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
63897 */
63898 TRACE_EVENT(irq_handler_exit,
63899
63900- TP_PROTO(int irq, struct irqaction *action, int ret),
63901+ TP_PROTO(int irq, const struct irqaction *action, int ret),
63902
63903 TP_ARGS(irq, action, ret),
63904
63905diff --git a/include/video/udlfb.h b/include/video/udlfb.h
63906index f9466fa..f4e2b81 100644
63907--- a/include/video/udlfb.h
63908+++ b/include/video/udlfb.h
63909@@ -53,10 +53,10 @@ struct dlfb_data {
63910 u32 pseudo_palette[256];
63911 int blank_mode; /*one of FB_BLANK_ */
63912 /* blit-only rendering path metrics, exposed through sysfs */
63913- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
63914- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
63915- atomic_t bytes_sent; /* to usb, after compression including overhead */
63916- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
63917+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
63918+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
63919+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
63920+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
63921 };
63922
63923 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
63924diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
63925index 0993a22..32ba2fe 100644
63926--- a/include/video/uvesafb.h
63927+++ b/include/video/uvesafb.h
63928@@ -177,6 +177,7 @@ struct uvesafb_par {
63929 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
63930 u8 pmi_setpal; /* PMI for palette changes */
63931 u16 *pmi_base; /* protected mode interface location */
63932+ u8 *pmi_code; /* protected mode code location */
63933 void *pmi_start;
63934 void *pmi_pal;
63935 u8 *vbe_state_orig; /*
63936diff --git a/init/Kconfig b/init/Kconfig
63937index 6cfd71d..73cb68d 100644
63938--- a/init/Kconfig
63939+++ b/init/Kconfig
63940@@ -790,6 +790,7 @@ endif # CGROUPS
63941
63942 config CHECKPOINT_RESTORE
63943 bool "Checkpoint/restore support" if EXPERT
63944+ depends on !GRKERNSEC
63945 default n
63946 help
63947 Enables additional kernel features in a sake of checkpoint/restore.
63948@@ -1240,7 +1241,7 @@ config SLUB_DEBUG
63949
63950 config COMPAT_BRK
63951 bool "Disable heap randomization"
63952- default y
63953+ default n
63954 help
63955 Randomizing heap placement makes heap exploits harder, but it
63956 also breaks ancient binaries (including anything libc5 based).
63957diff --git a/init/do_mounts.c b/init/do_mounts.c
63958index 42b0707..c06eef4 100644
63959--- a/init/do_mounts.c
63960+++ b/init/do_mounts.c
63961@@ -326,11 +326,11 @@ static void __init get_fs_names(char *page)
63962 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
63963 {
63964 struct super_block *s;
63965- int err = sys_mount(name, "/root", fs, flags, data);
63966+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
63967 if (err)
63968 return err;
63969
63970- sys_chdir((const char __user __force *)"/root");
63971+ sys_chdir((const char __force_user *)"/root");
63972 s = current->fs->pwd.dentry->d_sb;
63973 ROOT_DEV = s->s_dev;
63974 printk(KERN_INFO
63975@@ -450,18 +450,18 @@ void __init change_floppy(char *fmt, ...)
63976 va_start(args, fmt);
63977 vsprintf(buf, fmt, args);
63978 va_end(args);
63979- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
63980+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
63981 if (fd >= 0) {
63982 sys_ioctl(fd, FDEJECT, 0);
63983 sys_close(fd);
63984 }
63985 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
63986- fd = sys_open("/dev/console", O_RDWR, 0);
63987+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
63988 if (fd >= 0) {
63989 sys_ioctl(fd, TCGETS, (long)&termios);
63990 termios.c_lflag &= ~ICANON;
63991 sys_ioctl(fd, TCSETSF, (long)&termios);
63992- sys_read(fd, &c, 1);
63993+ sys_read(fd, (char __user *)&c, 1);
63994 termios.c_lflag |= ICANON;
63995 sys_ioctl(fd, TCSETSF, (long)&termios);
63996 sys_close(fd);
63997@@ -555,6 +555,6 @@ void __init prepare_namespace(void)
63998 mount_root();
63999 out:
64000 devtmpfs_mount("dev");
64001- sys_mount(".", "/", NULL, MS_MOVE, NULL);
64002- sys_chroot((const char __user __force *)".");
64003+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
64004+ sys_chroot((const char __force_user *)".");
64005 }
64006diff --git a/init/do_mounts.h b/init/do_mounts.h
64007index f5b978a..69dbfe8 100644
64008--- a/init/do_mounts.h
64009+++ b/init/do_mounts.h
64010@@ -15,15 +15,15 @@ extern int root_mountflags;
64011
64012 static inline int create_dev(char *name, dev_t dev)
64013 {
64014- sys_unlink(name);
64015- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
64016+ sys_unlink((char __force_user *)name);
64017+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
64018 }
64019
64020 #if BITS_PER_LONG == 32
64021 static inline u32 bstat(char *name)
64022 {
64023 struct stat64 stat;
64024- if (sys_stat64(name, &stat) != 0)
64025+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
64026 return 0;
64027 if (!S_ISBLK(stat.st_mode))
64028 return 0;
64029@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
64030 static inline u32 bstat(char *name)
64031 {
64032 struct stat stat;
64033- if (sys_newstat(name, &stat) != 0)
64034+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
64035 return 0;
64036 if (!S_ISBLK(stat.st_mode))
64037 return 0;
64038diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
64039index 9047330..de0d1fb 100644
64040--- a/init/do_mounts_initrd.c
64041+++ b/init/do_mounts_initrd.c
64042@@ -43,13 +43,13 @@ static void __init handle_initrd(void)
64043 create_dev("/dev/root.old", Root_RAM0);
64044 /* mount initrd on rootfs' /root */
64045 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
64046- sys_mkdir("/old", 0700);
64047- root_fd = sys_open("/", 0, 0);
64048- old_fd = sys_open("/old", 0, 0);
64049+ sys_mkdir((const char __force_user *)"/old", 0700);
64050+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
64051+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
64052 /* move initrd over / and chdir/chroot in initrd root */
64053- sys_chdir("/root");
64054- sys_mount(".", "/", NULL, MS_MOVE, NULL);
64055- sys_chroot(".");
64056+ sys_chdir((const char __force_user *)"/root");
64057+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
64058+ sys_chroot((const char __force_user *)".");
64059
64060 /*
64061 * In case that a resume from disk is carried out by linuxrc or one of
64062@@ -66,15 +66,15 @@ static void __init handle_initrd(void)
64063
64064 /* move initrd to rootfs' /old */
64065 sys_fchdir(old_fd);
64066- sys_mount("/", ".", NULL, MS_MOVE, NULL);
64067+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
64068 /* switch root and cwd back to / of rootfs */
64069 sys_fchdir(root_fd);
64070- sys_chroot(".");
64071+ sys_chroot((const char __force_user *)".");
64072 sys_close(old_fd);
64073 sys_close(root_fd);
64074
64075 if (new_decode_dev(real_root_dev) == Root_RAM0) {
64076- sys_chdir("/old");
64077+ sys_chdir((const char __force_user *)"/old");
64078 return;
64079 }
64080
64081@@ -82,17 +82,17 @@ static void __init handle_initrd(void)
64082 mount_root();
64083
64084 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
64085- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
64086+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
64087 if (!error)
64088 printk("okay\n");
64089 else {
64090- int fd = sys_open("/dev/root.old", O_RDWR, 0);
64091+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
64092 if (error == -ENOENT)
64093 printk("/initrd does not exist. Ignored.\n");
64094 else
64095 printk("failed\n");
64096 printk(KERN_NOTICE "Unmounting old root\n");
64097- sys_umount("/old", MNT_DETACH);
64098+ sys_umount((char __force_user *)"/old", MNT_DETACH);
64099 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
64100 if (fd < 0) {
64101 error = fd;
64102@@ -115,11 +115,11 @@ int __init initrd_load(void)
64103 * mounted in the normal path.
64104 */
64105 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
64106- sys_unlink("/initrd.image");
64107+ sys_unlink((const char __force_user *)"/initrd.image");
64108 handle_initrd();
64109 return 1;
64110 }
64111 }
64112- sys_unlink("/initrd.image");
64113+ sys_unlink((const char __force_user *)"/initrd.image");
64114 return 0;
64115 }
64116diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
64117index 32c4799..c27ee74 100644
64118--- a/init/do_mounts_md.c
64119+++ b/init/do_mounts_md.c
64120@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
64121 partitioned ? "_d" : "", minor,
64122 md_setup_args[ent].device_names);
64123
64124- fd = sys_open(name, 0, 0);
64125+ fd = sys_open((char __force_user *)name, 0, 0);
64126 if (fd < 0) {
64127 printk(KERN_ERR "md: open failed - cannot start "
64128 "array %s\n", name);
64129@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
64130 * array without it
64131 */
64132 sys_close(fd);
64133- fd = sys_open(name, 0, 0);
64134+ fd = sys_open((char __force_user *)name, 0, 0);
64135 sys_ioctl(fd, BLKRRPART, 0);
64136 }
64137 sys_close(fd);
64138@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
64139
64140 wait_for_device_probe();
64141
64142- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
64143+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
64144 if (fd >= 0) {
64145 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
64146 sys_close(fd);
64147diff --git a/init/initramfs.c b/init/initramfs.c
64148index 8216c30..25e8e32 100644
64149--- a/init/initramfs.c
64150+++ b/init/initramfs.c
64151@@ -74,7 +74,7 @@ static void __init free_hash(void)
64152 }
64153 }
64154
64155-static long __init do_utime(char __user *filename, time_t mtime)
64156+static long __init do_utime(__force char __user *filename, time_t mtime)
64157 {
64158 struct timespec t[2];
64159
64160@@ -109,7 +109,7 @@ static void __init dir_utime(void)
64161 struct dir_entry *de, *tmp;
64162 list_for_each_entry_safe(de, tmp, &dir_list, list) {
64163 list_del(&de->list);
64164- do_utime(de->name, de->mtime);
64165+ do_utime((char __force_user *)de->name, de->mtime);
64166 kfree(de->name);
64167 kfree(de);
64168 }
64169@@ -271,7 +271,7 @@ static int __init maybe_link(void)
64170 if (nlink >= 2) {
64171 char *old = find_link(major, minor, ino, mode, collected);
64172 if (old)
64173- return (sys_link(old, collected) < 0) ? -1 : 1;
64174+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
64175 }
64176 return 0;
64177 }
64178@@ -280,11 +280,11 @@ static void __init clean_path(char *path, umode_t mode)
64179 {
64180 struct stat st;
64181
64182- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
64183+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
64184 if (S_ISDIR(st.st_mode))
64185- sys_rmdir(path);
64186+ sys_rmdir((char __force_user *)path);
64187 else
64188- sys_unlink(path);
64189+ sys_unlink((char __force_user *)path);
64190 }
64191 }
64192
64193@@ -305,7 +305,7 @@ static int __init do_name(void)
64194 int openflags = O_WRONLY|O_CREAT;
64195 if (ml != 1)
64196 openflags |= O_TRUNC;
64197- wfd = sys_open(collected, openflags, mode);
64198+ wfd = sys_open((char __force_user *)collected, openflags, mode);
64199
64200 if (wfd >= 0) {
64201 sys_fchown(wfd, uid, gid);
64202@@ -317,17 +317,17 @@ static int __init do_name(void)
64203 }
64204 }
64205 } else if (S_ISDIR(mode)) {
64206- sys_mkdir(collected, mode);
64207- sys_chown(collected, uid, gid);
64208- sys_chmod(collected, mode);
64209+ sys_mkdir((char __force_user *)collected, mode);
64210+ sys_chown((char __force_user *)collected, uid, gid);
64211+ sys_chmod((char __force_user *)collected, mode);
64212 dir_add(collected, mtime);
64213 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
64214 S_ISFIFO(mode) || S_ISSOCK(mode)) {
64215 if (maybe_link() == 0) {
64216- sys_mknod(collected, mode, rdev);
64217- sys_chown(collected, uid, gid);
64218- sys_chmod(collected, mode);
64219- do_utime(collected, mtime);
64220+ sys_mknod((char __force_user *)collected, mode, rdev);
64221+ sys_chown((char __force_user *)collected, uid, gid);
64222+ sys_chmod((char __force_user *)collected, mode);
64223+ do_utime((char __force_user *)collected, mtime);
64224 }
64225 }
64226 return 0;
64227@@ -336,15 +336,15 @@ static int __init do_name(void)
64228 static int __init do_copy(void)
64229 {
64230 if (count >= body_len) {
64231- sys_write(wfd, victim, body_len);
64232+ sys_write(wfd, (char __force_user *)victim, body_len);
64233 sys_close(wfd);
64234- do_utime(vcollected, mtime);
64235+ do_utime((char __force_user *)vcollected, mtime);
64236 kfree(vcollected);
64237 eat(body_len);
64238 state = SkipIt;
64239 return 0;
64240 } else {
64241- sys_write(wfd, victim, count);
64242+ sys_write(wfd, (char __force_user *)victim, count);
64243 body_len -= count;
64244 eat(count);
64245 return 1;
64246@@ -355,9 +355,9 @@ static int __init do_symlink(void)
64247 {
64248 collected[N_ALIGN(name_len) + body_len] = '\0';
64249 clean_path(collected, 0);
64250- sys_symlink(collected + N_ALIGN(name_len), collected);
64251- sys_lchown(collected, uid, gid);
64252- do_utime(collected, mtime);
64253+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
64254+ sys_lchown((char __force_user *)collected, uid, gid);
64255+ do_utime((char __force_user *)collected, mtime);
64256 state = SkipIt;
64257 next_state = Reset;
64258 return 0;
64259diff --git a/init/main.c b/init/main.c
64260index b08c5f7..09f865e 100644
64261--- a/init/main.c
64262+++ b/init/main.c
64263@@ -95,6 +95,8 @@ static inline void mark_rodata_ro(void) { }
64264 extern void tc_init(void);
64265 #endif
64266
64267+extern void grsecurity_init(void);
64268+
64269 /*
64270 * Debug helper: via this flag we know that we are in 'early bootup code'
64271 * where only the boot processor is running with IRQ disabled. This means
64272@@ -148,6 +150,49 @@ static int __init set_reset_devices(char *str)
64273
64274 __setup("reset_devices", set_reset_devices);
64275
64276+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
64277+extern char pax_enter_kernel_user[];
64278+extern char pax_exit_kernel_user[];
64279+extern pgdval_t clone_pgd_mask;
64280+#endif
64281+
64282+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
64283+static int __init setup_pax_nouderef(char *str)
64284+{
64285+#ifdef CONFIG_X86_32
64286+ unsigned int cpu;
64287+ struct desc_struct *gdt;
64288+
64289+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
64290+ gdt = get_cpu_gdt_table(cpu);
64291+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
64292+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
64293+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
64294+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
64295+ }
64296+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
64297+#else
64298+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
64299+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
64300+ clone_pgd_mask = ~(pgdval_t)0UL;
64301+#endif
64302+
64303+ return 0;
64304+}
64305+early_param("pax_nouderef", setup_pax_nouderef);
64306+#endif
64307+
64308+#ifdef CONFIG_PAX_SOFTMODE
64309+int pax_softmode;
64310+
64311+static int __init setup_pax_softmode(char *str)
64312+{
64313+ get_option(&str, &pax_softmode);
64314+ return 1;
64315+}
64316+__setup("pax_softmode=", setup_pax_softmode);
64317+#endif
64318+
64319 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
64320 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
64321 static const char *panic_later, *panic_param;
64322@@ -674,6 +719,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
64323 {
64324 int count = preempt_count();
64325 int ret;
64326+ const char *msg1 = "", *msg2 = "";
64327
64328 if (initcall_debug)
64329 ret = do_one_initcall_debug(fn);
64330@@ -686,15 +732,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
64331 sprintf(msgbuf, "error code %d ", ret);
64332
64333 if (preempt_count() != count) {
64334- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
64335+ msg1 = " preemption imbalance";
64336 preempt_count() = count;
64337 }
64338 if (irqs_disabled()) {
64339- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
64340+ msg2 = " disabled interrupts";
64341 local_irq_enable();
64342 }
64343- if (msgbuf[0]) {
64344- printk("initcall %pF returned with %s\n", fn, msgbuf);
64345+ if (msgbuf[0] || *msg1 || *msg2) {
64346+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
64347 }
64348
64349 return ret;
64350@@ -865,7 +911,7 @@ static int __init kernel_init(void * unused)
64351 do_basic_setup();
64352
64353 /* Open the /dev/console on the rootfs, this should never fail */
64354- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
64355+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
64356 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
64357
64358 (void) sys_dup(0);
64359@@ -878,11 +924,13 @@ static int __init kernel_init(void * unused)
64360 if (!ramdisk_execute_command)
64361 ramdisk_execute_command = "/init";
64362
64363- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
64364+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
64365 ramdisk_execute_command = NULL;
64366 prepare_namespace();
64367 }
64368
64369+ grsecurity_init();
64370+
64371 /*
64372 * Ok, we have completed the initial bootup, and
64373 * we're essentially up and running. Get rid of the
64374diff --git a/ipc/mqueue.c b/ipc/mqueue.c
64375index 28bd64d..c66b72a 100644
64376--- a/ipc/mqueue.c
64377+++ b/ipc/mqueue.c
64378@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
64379 mq_bytes = (mq_msg_tblsz +
64380 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
64381
64382+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
64383 spin_lock(&mq_lock);
64384 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
64385 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
64386diff --git a/ipc/msg.c b/ipc/msg.c
64387index 7385de2..a8180e08 100644
64388--- a/ipc/msg.c
64389+++ b/ipc/msg.c
64390@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
64391 return security_msg_queue_associate(msq, msgflg);
64392 }
64393
64394+static struct ipc_ops msg_ops = {
64395+ .getnew = newque,
64396+ .associate = msg_security,
64397+ .more_checks = NULL
64398+};
64399+
64400 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
64401 {
64402 struct ipc_namespace *ns;
64403- struct ipc_ops msg_ops;
64404 struct ipc_params msg_params;
64405
64406 ns = current->nsproxy->ipc_ns;
64407
64408- msg_ops.getnew = newque;
64409- msg_ops.associate = msg_security;
64410- msg_ops.more_checks = NULL;
64411-
64412 msg_params.key = key;
64413 msg_params.flg = msgflg;
64414
64415diff --git a/ipc/sem.c b/ipc/sem.c
64416index 5215a81..cfc0cac 100644
64417--- a/ipc/sem.c
64418+++ b/ipc/sem.c
64419@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
64420 return 0;
64421 }
64422
64423+static struct ipc_ops sem_ops = {
64424+ .getnew = newary,
64425+ .associate = sem_security,
64426+ .more_checks = sem_more_checks
64427+};
64428+
64429 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
64430 {
64431 struct ipc_namespace *ns;
64432- struct ipc_ops sem_ops;
64433 struct ipc_params sem_params;
64434
64435 ns = current->nsproxy->ipc_ns;
64436@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
64437 if (nsems < 0 || nsems > ns->sc_semmsl)
64438 return -EINVAL;
64439
64440- sem_ops.getnew = newary;
64441- sem_ops.associate = sem_security;
64442- sem_ops.more_checks = sem_more_checks;
64443-
64444 sem_params.key = key;
64445 sem_params.flg = semflg;
64446 sem_params.u.nsems = nsems;
64447diff --git a/ipc/shm.c b/ipc/shm.c
64448index 406c5b2..bc66d67 100644
64449--- a/ipc/shm.c
64450+++ b/ipc/shm.c
64451@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
64452 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
64453 #endif
64454
64455+#ifdef CONFIG_GRKERNSEC
64456+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64457+ const time_t shm_createtime, const uid_t cuid,
64458+ const int shmid);
64459+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64460+ const time_t shm_createtime);
64461+#endif
64462+
64463 void shm_init_ns(struct ipc_namespace *ns)
64464 {
64465 ns->shm_ctlmax = SHMMAX;
64466@@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
64467 shp->shm_lprid = 0;
64468 shp->shm_atim = shp->shm_dtim = 0;
64469 shp->shm_ctim = get_seconds();
64470+#ifdef CONFIG_GRKERNSEC
64471+ {
64472+ struct timespec timeval;
64473+ do_posix_clock_monotonic_gettime(&timeval);
64474+
64475+ shp->shm_createtime = timeval.tv_sec;
64476+ }
64477+#endif
64478 shp->shm_segsz = size;
64479 shp->shm_nattch = 0;
64480 shp->shm_file = file;
64481@@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
64482 return 0;
64483 }
64484
64485+static struct ipc_ops shm_ops = {
64486+ .getnew = newseg,
64487+ .associate = shm_security,
64488+ .more_checks = shm_more_checks
64489+};
64490+
64491 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
64492 {
64493 struct ipc_namespace *ns;
64494- struct ipc_ops shm_ops;
64495 struct ipc_params shm_params;
64496
64497 ns = current->nsproxy->ipc_ns;
64498
64499- shm_ops.getnew = newseg;
64500- shm_ops.associate = shm_security;
64501- shm_ops.more_checks = shm_more_checks;
64502-
64503 shm_params.key = key;
64504 shm_params.flg = shmflg;
64505 shm_params.u.size = size;
64506@@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
64507 f_mode = FMODE_READ | FMODE_WRITE;
64508 }
64509 if (shmflg & SHM_EXEC) {
64510+
64511+#ifdef CONFIG_PAX_MPROTECT
64512+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
64513+ goto out;
64514+#endif
64515+
64516 prot |= PROT_EXEC;
64517 acc_mode |= S_IXUGO;
64518 }
64519@@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
64520 if (err)
64521 goto out_unlock;
64522
64523+#ifdef CONFIG_GRKERNSEC
64524+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
64525+ shp->shm_perm.cuid, shmid) ||
64526+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
64527+ err = -EACCES;
64528+ goto out_unlock;
64529+ }
64530+#endif
64531+
64532 path = shp->shm_file->f_path;
64533 path_get(&path);
64534 shp->shm_nattch++;
64535+#ifdef CONFIG_GRKERNSEC
64536+ shp->shm_lapid = current->pid;
64537+#endif
64538 size = i_size_read(path.dentry->d_inode);
64539 shm_unlock(shp);
64540
64541diff --git a/kernel/acct.c b/kernel/acct.c
64542index 02e6167..54824f7 100644
64543--- a/kernel/acct.c
64544+++ b/kernel/acct.c
64545@@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
64546 */
64547 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
64548 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
64549- file->f_op->write(file, (char *)&ac,
64550+ file->f_op->write(file, (char __force_user *)&ac,
64551 sizeof(acct_t), &file->f_pos);
64552 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
64553 set_fs(fs);
64554diff --git a/kernel/audit.c b/kernel/audit.c
64555index 1c7f2c6..9ba5359 100644
64556--- a/kernel/audit.c
64557+++ b/kernel/audit.c
64558@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
64559 3) suppressed due to audit_rate_limit
64560 4) suppressed due to audit_backlog_limit
64561 */
64562-static atomic_t audit_lost = ATOMIC_INIT(0);
64563+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
64564
64565 /* The netlink socket. */
64566 static struct sock *audit_sock;
64567@@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
64568 unsigned long now;
64569 int print;
64570
64571- atomic_inc(&audit_lost);
64572+ atomic_inc_unchecked(&audit_lost);
64573
64574 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
64575
64576@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
64577 printk(KERN_WARNING
64578 "audit: audit_lost=%d audit_rate_limit=%d "
64579 "audit_backlog_limit=%d\n",
64580- atomic_read(&audit_lost),
64581+ atomic_read_unchecked(&audit_lost),
64582 audit_rate_limit,
64583 audit_backlog_limit);
64584 audit_panic(message);
64585@@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
64586 status_set.pid = audit_pid;
64587 status_set.rate_limit = audit_rate_limit;
64588 status_set.backlog_limit = audit_backlog_limit;
64589- status_set.lost = atomic_read(&audit_lost);
64590+ status_set.lost = atomic_read_unchecked(&audit_lost);
64591 status_set.backlog = skb_queue_len(&audit_skb_queue);
64592 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
64593 &status_set, sizeof(status_set));
64594diff --git a/kernel/auditsc.c b/kernel/auditsc.c
64595index af1de0f..06dfe57 100644
64596--- a/kernel/auditsc.c
64597+++ b/kernel/auditsc.c
64598@@ -2288,7 +2288,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
64599 }
64600
64601 /* global counter which is incremented every time something logs in */
64602-static atomic_t session_id = ATOMIC_INIT(0);
64603+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
64604
64605 /**
64606 * audit_set_loginuid - set current task's audit_context loginuid
64607@@ -2312,7 +2312,7 @@ int audit_set_loginuid(uid_t loginuid)
64608 return -EPERM;
64609 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
64610
64611- sessionid = atomic_inc_return(&session_id);
64612+ sessionid = atomic_inc_return_unchecked(&session_id);
64613 if (context && context->in_syscall) {
64614 struct audit_buffer *ab;
64615
64616diff --git a/kernel/capability.c b/kernel/capability.c
64617index 3f1adb6..c564db0 100644
64618--- a/kernel/capability.c
64619+++ b/kernel/capability.c
64620@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
64621 * before modification is attempted and the application
64622 * fails.
64623 */
64624+ if (tocopy > ARRAY_SIZE(kdata))
64625+ return -EFAULT;
64626+
64627 if (copy_to_user(dataptr, kdata, tocopy
64628 * sizeof(struct __user_cap_data_struct))) {
64629 return -EFAULT;
64630@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
64631 int ret;
64632
64633 rcu_read_lock();
64634- ret = security_capable(__task_cred(t), ns, cap);
64635+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
64636+ gr_task_is_capable(t, __task_cred(t), cap);
64637 rcu_read_unlock();
64638
64639- return (ret == 0);
64640+ return ret;
64641 }
64642
64643 /**
64644@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
64645 int ret;
64646
64647 rcu_read_lock();
64648- ret = security_capable_noaudit(__task_cred(t), ns, cap);
64649+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
64650 rcu_read_unlock();
64651
64652- return (ret == 0);
64653+ return ret;
64654 }
64655
64656 /**
64657@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
64658 BUG();
64659 }
64660
64661- if (security_capable(current_cred(), ns, cap) == 0) {
64662+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
64663 current->flags |= PF_SUPERPRIV;
64664 return true;
64665 }
64666@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
64667 }
64668 EXPORT_SYMBOL(ns_capable);
64669
64670+bool ns_capable_nolog(struct user_namespace *ns, int cap)
64671+{
64672+ if (unlikely(!cap_valid(cap))) {
64673+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
64674+ BUG();
64675+ }
64676+
64677+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
64678+ current->flags |= PF_SUPERPRIV;
64679+ return true;
64680+ }
64681+ return false;
64682+}
64683+EXPORT_SYMBOL(ns_capable_nolog);
64684+
64685 /**
64686 * capable - Determine if the current task has a superior capability in effect
64687 * @cap: The capability to be tested for
64688@@ -408,6 +427,12 @@ bool capable(int cap)
64689 }
64690 EXPORT_SYMBOL(capable);
64691
64692+bool capable_nolog(int cap)
64693+{
64694+ return ns_capable_nolog(&init_user_ns, cap);
64695+}
64696+EXPORT_SYMBOL(capable_nolog);
64697+
64698 /**
64699 * nsown_capable - Check superior capability to one's own user_ns
64700 * @cap: The capability in question
64701diff --git a/kernel/compat.c b/kernel/compat.c
64702index d2c67aa..a629b2e 100644
64703--- a/kernel/compat.c
64704+++ b/kernel/compat.c
64705@@ -13,6 +13,7 @@
64706
64707 #include <linux/linkage.h>
64708 #include <linux/compat.h>
64709+#include <linux/module.h>
64710 #include <linux/errno.h>
64711 #include <linux/time.h>
64712 #include <linux/signal.h>
64713@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
64714 mm_segment_t oldfs;
64715 long ret;
64716
64717- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
64718+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
64719 oldfs = get_fs();
64720 set_fs(KERNEL_DS);
64721 ret = hrtimer_nanosleep_restart(restart);
64722@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
64723 oldfs = get_fs();
64724 set_fs(KERNEL_DS);
64725 ret = hrtimer_nanosleep(&tu,
64726- rmtp ? (struct timespec __user *)&rmt : NULL,
64727+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
64728 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
64729 set_fs(oldfs);
64730
64731@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
64732 mm_segment_t old_fs = get_fs();
64733
64734 set_fs(KERNEL_DS);
64735- ret = sys_sigpending((old_sigset_t __user *) &s);
64736+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
64737 set_fs(old_fs);
64738 if (ret == 0)
64739 ret = put_user(s, set);
64740@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
64741 mm_segment_t old_fs = get_fs();
64742
64743 set_fs(KERNEL_DS);
64744- ret = sys_old_getrlimit(resource, &r);
64745+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
64746 set_fs(old_fs);
64747
64748 if (!ret) {
64749@@ -523,7 +524,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
64750 mm_segment_t old_fs = get_fs();
64751
64752 set_fs(KERNEL_DS);
64753- ret = sys_getrusage(who, (struct rusage __user *) &r);
64754+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
64755 set_fs(old_fs);
64756
64757 if (ret)
64758@@ -550,8 +551,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
64759 set_fs (KERNEL_DS);
64760 ret = sys_wait4(pid,
64761 (stat_addr ?
64762- (unsigned int __user *) &status : NULL),
64763- options, (struct rusage __user *) &r);
64764+ (unsigned int __force_user *) &status : NULL),
64765+ options, (struct rusage __force_user *) &r);
64766 set_fs (old_fs);
64767
64768 if (ret > 0) {
64769@@ -576,8 +577,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
64770 memset(&info, 0, sizeof(info));
64771
64772 set_fs(KERNEL_DS);
64773- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
64774- uru ? (struct rusage __user *)&ru : NULL);
64775+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
64776+ uru ? (struct rusage __force_user *)&ru : NULL);
64777 set_fs(old_fs);
64778
64779 if ((ret < 0) || (info.si_signo == 0))
64780@@ -707,8 +708,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
64781 oldfs = get_fs();
64782 set_fs(KERNEL_DS);
64783 err = sys_timer_settime(timer_id, flags,
64784- (struct itimerspec __user *) &newts,
64785- (struct itimerspec __user *) &oldts);
64786+ (struct itimerspec __force_user *) &newts,
64787+ (struct itimerspec __force_user *) &oldts);
64788 set_fs(oldfs);
64789 if (!err && old && put_compat_itimerspec(old, &oldts))
64790 return -EFAULT;
64791@@ -725,7 +726,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
64792 oldfs = get_fs();
64793 set_fs(KERNEL_DS);
64794 err = sys_timer_gettime(timer_id,
64795- (struct itimerspec __user *) &ts);
64796+ (struct itimerspec __force_user *) &ts);
64797 set_fs(oldfs);
64798 if (!err && put_compat_itimerspec(setting, &ts))
64799 return -EFAULT;
64800@@ -744,7 +745,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
64801 oldfs = get_fs();
64802 set_fs(KERNEL_DS);
64803 err = sys_clock_settime(which_clock,
64804- (struct timespec __user *) &ts);
64805+ (struct timespec __force_user *) &ts);
64806 set_fs(oldfs);
64807 return err;
64808 }
64809@@ -759,7 +760,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
64810 oldfs = get_fs();
64811 set_fs(KERNEL_DS);
64812 err = sys_clock_gettime(which_clock,
64813- (struct timespec __user *) &ts);
64814+ (struct timespec __force_user *) &ts);
64815 set_fs(oldfs);
64816 if (!err && put_compat_timespec(&ts, tp))
64817 return -EFAULT;
64818@@ -779,7 +780,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
64819
64820 oldfs = get_fs();
64821 set_fs(KERNEL_DS);
64822- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
64823+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
64824 set_fs(oldfs);
64825
64826 err = compat_put_timex(utp, &txc);
64827@@ -799,7 +800,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
64828 oldfs = get_fs();
64829 set_fs(KERNEL_DS);
64830 err = sys_clock_getres(which_clock,
64831- (struct timespec __user *) &ts);
64832+ (struct timespec __force_user *) &ts);
64833 set_fs(oldfs);
64834 if (!err && tp && put_compat_timespec(&ts, tp))
64835 return -EFAULT;
64836@@ -811,9 +812,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
64837 long err;
64838 mm_segment_t oldfs;
64839 struct timespec tu;
64840- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
64841+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
64842
64843- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
64844+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
64845 oldfs = get_fs();
64846 set_fs(KERNEL_DS);
64847 err = clock_nanosleep_restart(restart);
64848@@ -845,8 +846,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
64849 oldfs = get_fs();
64850 set_fs(KERNEL_DS);
64851 err = sys_clock_nanosleep(which_clock, flags,
64852- (struct timespec __user *) &in,
64853- (struct timespec __user *) &out);
64854+ (struct timespec __force_user *) &in,
64855+ (struct timespec __force_user *) &out);
64856 set_fs(oldfs);
64857
64858 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
64859diff --git a/kernel/configs.c b/kernel/configs.c
64860index 42e8fa0..9e7406b 100644
64861--- a/kernel/configs.c
64862+++ b/kernel/configs.c
64863@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
64864 struct proc_dir_entry *entry;
64865
64866 /* create the current config file */
64867+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
64868+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
64869+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
64870+ &ikconfig_file_ops);
64871+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64872+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
64873+ &ikconfig_file_ops);
64874+#endif
64875+#else
64876 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
64877 &ikconfig_file_ops);
64878+#endif
64879+
64880 if (!entry)
64881 return -ENOMEM;
64882
64883diff --git a/kernel/cred.c b/kernel/cred.c
64884index e70683d..27761b6 100644
64885--- a/kernel/cred.c
64886+++ b/kernel/cred.c
64887@@ -205,6 +205,15 @@ void exit_creds(struct task_struct *tsk)
64888 validate_creds(cred);
64889 put_cred(cred);
64890 }
64891+
64892+#ifdef CONFIG_GRKERNSEC_SETXID
64893+ cred = (struct cred *) tsk->delayed_cred;
64894+ if (cred) {
64895+ tsk->delayed_cred = NULL;
64896+ validate_creds(cred);
64897+ put_cred(cred);
64898+ }
64899+#endif
64900 }
64901
64902 /**
64903@@ -473,7 +482,7 @@ error_put:
64904 * Always returns 0 thus allowing this function to be tail-called at the end
64905 * of, say, sys_setgid().
64906 */
64907-int commit_creds(struct cred *new)
64908+static int __commit_creds(struct cred *new)
64909 {
64910 struct task_struct *task = current;
64911 const struct cred *old = task->real_cred;
64912@@ -492,6 +501,8 @@ int commit_creds(struct cred *new)
64913
64914 get_cred(new); /* we will require a ref for the subj creds too */
64915
64916+ gr_set_role_label(task, new->uid, new->gid);
64917+
64918 /* dumpability changes */
64919 if (old->euid != new->euid ||
64920 old->egid != new->egid ||
64921@@ -541,6 +552,101 @@ int commit_creds(struct cred *new)
64922 put_cred(old);
64923 return 0;
64924 }
64925+#ifdef CONFIG_GRKERNSEC_SETXID
64926+extern int set_user(struct cred *new);
64927+
64928+void gr_delayed_cred_worker(void)
64929+{
64930+ const struct cred *new = current->delayed_cred;
64931+ struct cred *ncred;
64932+
64933+ current->delayed_cred = NULL;
64934+
64935+ if (current_uid() && new != NULL) {
64936+ // from doing get_cred on it when queueing this
64937+ put_cred(new);
64938+ return;
64939+ } else if (new == NULL)
64940+ return;
64941+
64942+ ncred = prepare_creds();
64943+ if (!ncred)
64944+ goto die;
64945+ // uids
64946+ ncred->uid = new->uid;
64947+ ncred->euid = new->euid;
64948+ ncred->suid = new->suid;
64949+ ncred->fsuid = new->fsuid;
64950+ // gids
64951+ ncred->gid = new->gid;
64952+ ncred->egid = new->egid;
64953+ ncred->sgid = new->sgid;
64954+ ncred->fsgid = new->fsgid;
64955+ // groups
64956+ if (set_groups(ncred, new->group_info) < 0) {
64957+ abort_creds(ncred);
64958+ goto die;
64959+ }
64960+ // caps
64961+ ncred->securebits = new->securebits;
64962+ ncred->cap_inheritable = new->cap_inheritable;
64963+ ncred->cap_permitted = new->cap_permitted;
64964+ ncred->cap_effective = new->cap_effective;
64965+ ncred->cap_bset = new->cap_bset;
64966+
64967+ if (set_user(ncred)) {
64968+ abort_creds(ncred);
64969+ goto die;
64970+ }
64971+
64972+ // from doing get_cred on it when queueing this
64973+ put_cred(new);
64974+
64975+ __commit_creds(ncred);
64976+ return;
64977+die:
64978+ // from doing get_cred on it when queueing this
64979+ put_cred(new);
64980+ do_group_exit(SIGKILL);
64981+}
64982+#endif
64983+
64984+int commit_creds(struct cred *new)
64985+{
64986+#ifdef CONFIG_GRKERNSEC_SETXID
64987+ int ret;
64988+ int schedule_it = 0;
64989+ struct task_struct *t;
64990+
64991+ /* we won't get called with tasklist_lock held for writing
64992+ and interrupts disabled as the cred struct in that case is
64993+ init_cred
64994+ */
64995+ if (grsec_enable_setxid && !current_is_single_threaded() &&
64996+ !current_uid() && new->uid) {
64997+ schedule_it = 1;
64998+ }
64999+ ret = __commit_creds(new);
65000+ if (schedule_it) {
65001+ rcu_read_lock();
65002+ read_lock(&tasklist_lock);
65003+ for (t = next_thread(current); t != current;
65004+ t = next_thread(t)) {
65005+ if (t->delayed_cred == NULL) {
65006+ t->delayed_cred = get_cred(new);
65007+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
65008+ set_tsk_need_resched(t);
65009+ }
65010+ }
65011+ read_unlock(&tasklist_lock);
65012+ rcu_read_unlock();
65013+ }
65014+ return ret;
65015+#else
65016+ return __commit_creds(new);
65017+#endif
65018+}
65019+
65020 EXPORT_SYMBOL(commit_creds);
65021
65022 /**
65023diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
65024index 0557f24..1a00d9a 100644
65025--- a/kernel/debug/debug_core.c
65026+++ b/kernel/debug/debug_core.c
65027@@ -122,7 +122,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
65028 */
65029 static atomic_t masters_in_kgdb;
65030 static atomic_t slaves_in_kgdb;
65031-static atomic_t kgdb_break_tasklet_var;
65032+static atomic_unchecked_t kgdb_break_tasklet_var;
65033 atomic_t kgdb_setting_breakpoint;
65034
65035 struct task_struct *kgdb_usethread;
65036@@ -132,7 +132,7 @@ int kgdb_single_step;
65037 static pid_t kgdb_sstep_pid;
65038
65039 /* to keep track of the CPU which is doing the single stepping*/
65040-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
65041+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
65042
65043 /*
65044 * If you are debugging a problem where roundup (the collection of
65045@@ -540,7 +540,7 @@ return_normal:
65046 * kernel will only try for the value of sstep_tries before
65047 * giving up and continuing on.
65048 */
65049- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
65050+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
65051 (kgdb_info[cpu].task &&
65052 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
65053 atomic_set(&kgdb_active, -1);
65054@@ -634,8 +634,8 @@ cpu_master_loop:
65055 }
65056
65057 kgdb_restore:
65058- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
65059- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
65060+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
65061+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
65062 if (kgdb_info[sstep_cpu].task)
65063 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
65064 else
65065@@ -861,18 +861,18 @@ static void kgdb_unregister_callbacks(void)
65066 static void kgdb_tasklet_bpt(unsigned long ing)
65067 {
65068 kgdb_breakpoint();
65069- atomic_set(&kgdb_break_tasklet_var, 0);
65070+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
65071 }
65072
65073 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
65074
65075 void kgdb_schedule_breakpoint(void)
65076 {
65077- if (atomic_read(&kgdb_break_tasklet_var) ||
65078+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
65079 atomic_read(&kgdb_active) != -1 ||
65080 atomic_read(&kgdb_setting_breakpoint))
65081 return;
65082- atomic_inc(&kgdb_break_tasklet_var);
65083+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
65084 tasklet_schedule(&kgdb_tasklet_breakpoint);
65085 }
65086 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
65087diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
65088index 67b847d..93834dd 100644
65089--- a/kernel/debug/kdb/kdb_main.c
65090+++ b/kernel/debug/kdb/kdb_main.c
65091@@ -1983,7 +1983,7 @@ static int kdb_lsmod(int argc, const char **argv)
65092 list_for_each_entry(mod, kdb_modules, list) {
65093
65094 kdb_printf("%-20s%8u 0x%p ", mod->name,
65095- mod->core_size, (void *)mod);
65096+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
65097 #ifdef CONFIG_MODULE_UNLOAD
65098 kdb_printf("%4ld ", module_refcount(mod));
65099 #endif
65100@@ -1993,7 +1993,7 @@ static int kdb_lsmod(int argc, const char **argv)
65101 kdb_printf(" (Loading)");
65102 else
65103 kdb_printf(" (Live)");
65104- kdb_printf(" 0x%p", mod->module_core);
65105+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
65106
65107 #ifdef CONFIG_MODULE_UNLOAD
65108 {
65109diff --git a/kernel/events/core.c b/kernel/events/core.c
65110index fd126f8..70b755b 100644
65111--- a/kernel/events/core.c
65112+++ b/kernel/events/core.c
65113@@ -181,7 +181,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
65114 return 0;
65115 }
65116
65117-static atomic64_t perf_event_id;
65118+static atomic64_unchecked_t perf_event_id;
65119
65120 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
65121 enum event_type_t event_type);
65122@@ -2659,7 +2659,7 @@ static void __perf_event_read(void *info)
65123
65124 static inline u64 perf_event_count(struct perf_event *event)
65125 {
65126- return local64_read(&event->count) + atomic64_read(&event->child_count);
65127+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
65128 }
65129
65130 static u64 perf_event_read(struct perf_event *event)
65131@@ -2983,9 +2983,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
65132 mutex_lock(&event->child_mutex);
65133 total += perf_event_read(event);
65134 *enabled += event->total_time_enabled +
65135- atomic64_read(&event->child_total_time_enabled);
65136+ atomic64_read_unchecked(&event->child_total_time_enabled);
65137 *running += event->total_time_running +
65138- atomic64_read(&event->child_total_time_running);
65139+ atomic64_read_unchecked(&event->child_total_time_running);
65140
65141 list_for_each_entry(child, &event->child_list, child_list) {
65142 total += perf_event_read(child);
65143@@ -3393,10 +3393,10 @@ void perf_event_update_userpage(struct perf_event *event)
65144 userpg->offset -= local64_read(&event->hw.prev_count);
65145
65146 userpg->time_enabled = enabled +
65147- atomic64_read(&event->child_total_time_enabled);
65148+ atomic64_read_unchecked(&event->child_total_time_enabled);
65149
65150 userpg->time_running = running +
65151- atomic64_read(&event->child_total_time_running);
65152+ atomic64_read_unchecked(&event->child_total_time_running);
65153
65154 arch_perf_update_userpage(userpg, now);
65155
65156@@ -3829,11 +3829,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
65157 values[n++] = perf_event_count(event);
65158 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
65159 values[n++] = enabled +
65160- atomic64_read(&event->child_total_time_enabled);
65161+ atomic64_read_unchecked(&event->child_total_time_enabled);
65162 }
65163 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
65164 values[n++] = running +
65165- atomic64_read(&event->child_total_time_running);
65166+ atomic64_read_unchecked(&event->child_total_time_running);
65167 }
65168 if (read_format & PERF_FORMAT_ID)
65169 values[n++] = primary_event_id(event);
65170@@ -4511,12 +4511,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
65171 * need to add enough zero bytes after the string to handle
65172 * the 64bit alignment we do later.
65173 */
65174- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
65175+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
65176 if (!buf) {
65177 name = strncpy(tmp, "//enomem", sizeof(tmp));
65178 goto got_name;
65179 }
65180- name = d_path(&file->f_path, buf, PATH_MAX);
65181+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
65182 if (IS_ERR(name)) {
65183 name = strncpy(tmp, "//toolong", sizeof(tmp));
65184 goto got_name;
65185@@ -5929,7 +5929,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
65186 event->parent = parent_event;
65187
65188 event->ns = get_pid_ns(current->nsproxy->pid_ns);
65189- event->id = atomic64_inc_return(&perf_event_id);
65190+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
65191
65192 event->state = PERF_EVENT_STATE_INACTIVE;
65193
65194@@ -6491,10 +6491,10 @@ static void sync_child_event(struct perf_event *child_event,
65195 /*
65196 * Add back the child's count to the parent's count:
65197 */
65198- atomic64_add(child_val, &parent_event->child_count);
65199- atomic64_add(child_event->total_time_enabled,
65200+ atomic64_add_unchecked(child_val, &parent_event->child_count);
65201+ atomic64_add_unchecked(child_event->total_time_enabled,
65202 &parent_event->child_total_time_enabled);
65203- atomic64_add(child_event->total_time_running,
65204+ atomic64_add_unchecked(child_event->total_time_running,
65205 &parent_event->child_total_time_running);
65206
65207 /*
65208diff --git a/kernel/exit.c b/kernel/exit.c
65209index d8bd3b42..26bd8dc 100644
65210--- a/kernel/exit.c
65211+++ b/kernel/exit.c
65212@@ -59,6 +59,10 @@
65213 #include <asm/pgtable.h>
65214 #include <asm/mmu_context.h>
65215
65216+#ifdef CONFIG_GRKERNSEC
65217+extern rwlock_t grsec_exec_file_lock;
65218+#endif
65219+
65220 static void exit_mm(struct task_struct * tsk);
65221
65222 static void __unhash_process(struct task_struct *p, bool group_dead)
65223@@ -170,6 +174,10 @@ void release_task(struct task_struct * p)
65224 struct task_struct *leader;
65225 int zap_leader;
65226 repeat:
65227+#ifdef CONFIG_NET
65228+ gr_del_task_from_ip_table(p);
65229+#endif
65230+
65231 /* don't need to get the RCU readlock here - the process is dead and
65232 * can't be modifying its own credentials. But shut RCU-lockdep up */
65233 rcu_read_lock();
65234@@ -382,7 +390,7 @@ int allow_signal(int sig)
65235 * know it'll be handled, so that they don't get converted to
65236 * SIGKILL or just silently dropped.
65237 */
65238- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
65239+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
65240 recalc_sigpending();
65241 spin_unlock_irq(&current->sighand->siglock);
65242 return 0;
65243@@ -418,6 +426,17 @@ void daemonize(const char *name, ...)
65244 vsnprintf(current->comm, sizeof(current->comm), name, args);
65245 va_end(args);
65246
65247+#ifdef CONFIG_GRKERNSEC
65248+ write_lock(&grsec_exec_file_lock);
65249+ if (current->exec_file) {
65250+ fput(current->exec_file);
65251+ current->exec_file = NULL;
65252+ }
65253+ write_unlock(&grsec_exec_file_lock);
65254+#endif
65255+
65256+ gr_set_kernel_label(current);
65257+
65258 /*
65259 * If we were started as result of loading a module, close all of the
65260 * user space pages. We don't need them, and if we didn't close them
65261@@ -900,6 +919,8 @@ void do_exit(long code)
65262 struct task_struct *tsk = current;
65263 int group_dead;
65264
65265+ set_fs(USER_DS);
65266+
65267 profile_task_exit(tsk);
65268
65269 WARN_ON(blk_needs_flush_plug(tsk));
65270@@ -916,7 +937,6 @@ void do_exit(long code)
65271 * mm_release()->clear_child_tid() from writing to a user-controlled
65272 * kernel address.
65273 */
65274- set_fs(USER_DS);
65275
65276 ptrace_event(PTRACE_EVENT_EXIT, code);
65277
65278@@ -977,6 +997,9 @@ void do_exit(long code)
65279 tsk->exit_code = code;
65280 taskstats_exit(tsk, group_dead);
65281
65282+ gr_acl_handle_psacct(tsk, code);
65283+ gr_acl_handle_exit();
65284+
65285 exit_mm(tsk);
65286
65287 if (group_dead)
65288@@ -1093,7 +1116,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
65289 * Take down every thread in the group. This is called by fatal signals
65290 * as well as by sys_exit_group (below).
65291 */
65292-void
65293+__noreturn void
65294 do_group_exit(int exit_code)
65295 {
65296 struct signal_struct *sig = current->signal;
65297diff --git a/kernel/fork.c b/kernel/fork.c
65298index 8163333..efb4692 100644
65299--- a/kernel/fork.c
65300+++ b/kernel/fork.c
65301@@ -286,7 +286,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
65302 *stackend = STACK_END_MAGIC; /* for overflow detection */
65303
65304 #ifdef CONFIG_CC_STACKPROTECTOR
65305- tsk->stack_canary = get_random_int();
65306+ tsk->stack_canary = pax_get_random_long();
65307 #endif
65308
65309 /*
65310@@ -310,13 +310,78 @@ out:
65311 }
65312
65313 #ifdef CONFIG_MMU
65314+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
65315+{
65316+ struct vm_area_struct *tmp;
65317+ unsigned long charge;
65318+ struct mempolicy *pol;
65319+ struct file *file;
65320+
65321+ charge = 0;
65322+ if (mpnt->vm_flags & VM_ACCOUNT) {
65323+ unsigned long len;
65324+ len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
65325+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
65326+ goto fail_nomem;
65327+ charge = len;
65328+ }
65329+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65330+ if (!tmp)
65331+ goto fail_nomem;
65332+ *tmp = *mpnt;
65333+ tmp->vm_mm = mm;
65334+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
65335+ pol = mpol_dup(vma_policy(mpnt));
65336+ if (IS_ERR(pol))
65337+ goto fail_nomem_policy;
65338+ vma_set_policy(tmp, pol);
65339+ if (anon_vma_fork(tmp, mpnt))
65340+ goto fail_nomem_anon_vma_fork;
65341+ tmp->vm_flags &= ~VM_LOCKED;
65342+ tmp->vm_next = tmp->vm_prev = NULL;
65343+ tmp->vm_mirror = NULL;
65344+ file = tmp->vm_file;
65345+ if (file) {
65346+ struct inode *inode = file->f_path.dentry->d_inode;
65347+ struct address_space *mapping = file->f_mapping;
65348+
65349+ get_file(file);
65350+ if (tmp->vm_flags & VM_DENYWRITE)
65351+ atomic_dec(&inode->i_writecount);
65352+ mutex_lock(&mapping->i_mmap_mutex);
65353+ if (tmp->vm_flags & VM_SHARED)
65354+ mapping->i_mmap_writable++;
65355+ flush_dcache_mmap_lock(mapping);
65356+ /* insert tmp into the share list, just after mpnt */
65357+ vma_prio_tree_add(tmp, mpnt);
65358+ flush_dcache_mmap_unlock(mapping);
65359+ mutex_unlock(&mapping->i_mmap_mutex);
65360+ }
65361+
65362+ /*
65363+ * Clear hugetlb-related page reserves for children. This only
65364+ * affects MAP_PRIVATE mappings. Faults generated by the child
65365+ * are not guaranteed to succeed, even if read-only
65366+ */
65367+ if (is_vm_hugetlb_page(tmp))
65368+ reset_vma_resv_huge_pages(tmp);
65369+
65370+ return tmp;
65371+
65372+fail_nomem_anon_vma_fork:
65373+ mpol_put(pol);
65374+fail_nomem_policy:
65375+ kmem_cache_free(vm_area_cachep, tmp);
65376+fail_nomem:
65377+ vm_unacct_memory(charge);
65378+ return NULL;
65379+}
65380+
65381 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65382 {
65383 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
65384 struct rb_node **rb_link, *rb_parent;
65385 int retval;
65386- unsigned long charge;
65387- struct mempolicy *pol;
65388
65389 down_write(&oldmm->mmap_sem);
65390 flush_cache_dup_mm(oldmm);
65391@@ -328,8 +393,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65392 mm->locked_vm = 0;
65393 mm->mmap = NULL;
65394 mm->mmap_cache = NULL;
65395- mm->free_area_cache = oldmm->mmap_base;
65396- mm->cached_hole_size = ~0UL;
65397+ mm->free_area_cache = oldmm->free_area_cache;
65398+ mm->cached_hole_size = oldmm->cached_hole_size;
65399 mm->map_count = 0;
65400 cpumask_clear(mm_cpumask(mm));
65401 mm->mm_rb = RB_ROOT;
65402@@ -345,8 +410,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65403
65404 prev = NULL;
65405 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
65406- struct file *file;
65407-
65408 if (mpnt->vm_flags & VM_DONTCOPY) {
65409 long pages = vma_pages(mpnt);
65410 mm->total_vm -= pages;
65411@@ -354,54 +417,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65412 -pages);
65413 continue;
65414 }
65415- charge = 0;
65416- if (mpnt->vm_flags & VM_ACCOUNT) {
65417- unsigned long len;
65418- len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
65419- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
65420- goto fail_nomem;
65421- charge = len;
65422+ tmp = dup_vma(mm, oldmm, mpnt);
65423+ if (!tmp) {
65424+ retval = -ENOMEM;
65425+ goto out;
65426 }
65427- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65428- if (!tmp)
65429- goto fail_nomem;
65430- *tmp = *mpnt;
65431- INIT_LIST_HEAD(&tmp->anon_vma_chain);
65432- pol = mpol_dup(vma_policy(mpnt));
65433- retval = PTR_ERR(pol);
65434- if (IS_ERR(pol))
65435- goto fail_nomem_policy;
65436- vma_set_policy(tmp, pol);
65437- tmp->vm_mm = mm;
65438- if (anon_vma_fork(tmp, mpnt))
65439- goto fail_nomem_anon_vma_fork;
65440- tmp->vm_flags &= ~VM_LOCKED;
65441- tmp->vm_next = tmp->vm_prev = NULL;
65442- file = tmp->vm_file;
65443- if (file) {
65444- struct inode *inode = file->f_path.dentry->d_inode;
65445- struct address_space *mapping = file->f_mapping;
65446-
65447- get_file(file);
65448- if (tmp->vm_flags & VM_DENYWRITE)
65449- atomic_dec(&inode->i_writecount);
65450- mutex_lock(&mapping->i_mmap_mutex);
65451- if (tmp->vm_flags & VM_SHARED)
65452- mapping->i_mmap_writable++;
65453- flush_dcache_mmap_lock(mapping);
65454- /* insert tmp into the share list, just after mpnt */
65455- vma_prio_tree_add(tmp, mpnt);
65456- flush_dcache_mmap_unlock(mapping);
65457- mutex_unlock(&mapping->i_mmap_mutex);
65458- }
65459-
65460- /*
65461- * Clear hugetlb-related page reserves for children. This only
65462- * affects MAP_PRIVATE mappings. Faults generated by the child
65463- * are not guaranteed to succeed, even if read-only
65464- */
65465- if (is_vm_hugetlb_page(tmp))
65466- reset_vma_resv_huge_pages(tmp);
65467
65468 /*
65469 * Link in the new vma and copy the page table entries.
65470@@ -424,6 +444,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65471 if (retval)
65472 goto out;
65473 }
65474+
65475+#ifdef CONFIG_PAX_SEGMEXEC
65476+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
65477+ struct vm_area_struct *mpnt_m;
65478+
65479+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
65480+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
65481+
65482+ if (!mpnt->vm_mirror)
65483+ continue;
65484+
65485+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
65486+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
65487+ mpnt->vm_mirror = mpnt_m;
65488+ } else {
65489+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
65490+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
65491+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
65492+ mpnt->vm_mirror->vm_mirror = mpnt;
65493+ }
65494+ }
65495+ BUG_ON(mpnt_m);
65496+ }
65497+#endif
65498+
65499 /* a new mm has just been created */
65500 arch_dup_mmap(oldmm, mm);
65501 retval = 0;
65502@@ -432,14 +477,6 @@ out:
65503 flush_tlb_mm(oldmm);
65504 up_write(&oldmm->mmap_sem);
65505 return retval;
65506-fail_nomem_anon_vma_fork:
65507- mpol_put(pol);
65508-fail_nomem_policy:
65509- kmem_cache_free(vm_area_cachep, tmp);
65510-fail_nomem:
65511- retval = -ENOMEM;
65512- vm_unacct_memory(charge);
65513- goto out;
65514 }
65515
65516 static inline int mm_alloc_pgd(struct mm_struct *mm)
65517@@ -676,8 +713,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
65518 return ERR_PTR(err);
65519
65520 mm = get_task_mm(task);
65521- if (mm && mm != current->mm &&
65522- !ptrace_may_access(task, mode)) {
65523+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
65524+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
65525 mmput(mm);
65526 mm = ERR_PTR(-EACCES);
65527 }
65528@@ -899,13 +936,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
65529 spin_unlock(&fs->lock);
65530 return -EAGAIN;
65531 }
65532- fs->users++;
65533+ atomic_inc(&fs->users);
65534 spin_unlock(&fs->lock);
65535 return 0;
65536 }
65537 tsk->fs = copy_fs_struct(fs);
65538 if (!tsk->fs)
65539 return -ENOMEM;
65540+ gr_set_chroot_entries(tsk, &tsk->fs->root);
65541 return 0;
65542 }
65543
65544@@ -1172,6 +1210,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
65545 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
65546 #endif
65547 retval = -EAGAIN;
65548+
65549+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
65550+
65551 if (atomic_read(&p->real_cred->user->processes) >=
65552 task_rlimit(p, RLIMIT_NPROC)) {
65553 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
65554@@ -1328,6 +1369,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
65555 if (clone_flags & CLONE_THREAD)
65556 p->tgid = current->tgid;
65557
65558+ gr_copy_label(p);
65559+
65560 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
65561 /*
65562 * Clear TID on mm_release()?
65563@@ -1502,6 +1545,8 @@ bad_fork_cleanup_count:
65564 bad_fork_free:
65565 free_task(p);
65566 fork_out:
65567+ gr_log_forkfail(retval);
65568+
65569 return ERR_PTR(retval);
65570 }
65571
65572@@ -1602,6 +1647,8 @@ long do_fork(unsigned long clone_flags,
65573 if (clone_flags & CLONE_PARENT_SETTID)
65574 put_user(nr, parent_tidptr);
65575
65576+ gr_handle_brute_check();
65577+
65578 if (clone_flags & CLONE_VFORK) {
65579 p->vfork_done = &vfork;
65580 init_completion(&vfork);
65581@@ -1700,7 +1747,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
65582 return 0;
65583
65584 /* don't need lock here; in the worst case we'll do useless copy */
65585- if (fs->users == 1)
65586+ if (atomic_read(&fs->users) == 1)
65587 return 0;
65588
65589 *new_fsp = copy_fs_struct(fs);
65590@@ -1789,7 +1836,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
65591 fs = current->fs;
65592 spin_lock(&fs->lock);
65593 current->fs = new_fs;
65594- if (--fs->users)
65595+ gr_set_chroot_entries(current, &current->fs->root);
65596+ if (atomic_dec_return(&fs->users))
65597 new_fs = NULL;
65598 else
65599 new_fs = fs;
65600diff --git a/kernel/futex.c b/kernel/futex.c
65601index e2b0fb9..db818ac 100644
65602--- a/kernel/futex.c
65603+++ b/kernel/futex.c
65604@@ -54,6 +54,7 @@
65605 #include <linux/mount.h>
65606 #include <linux/pagemap.h>
65607 #include <linux/syscalls.h>
65608+#include <linux/ptrace.h>
65609 #include <linux/signal.h>
65610 #include <linux/export.h>
65611 #include <linux/magic.h>
65612@@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
65613 struct page *page, *page_head;
65614 int err, ro = 0;
65615
65616+#ifdef CONFIG_PAX_SEGMEXEC
65617+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
65618+ return -EFAULT;
65619+#endif
65620+
65621 /*
65622 * The futex address must be "naturally" aligned.
65623 */
65624@@ -2711,6 +2717,7 @@ static int __init futex_init(void)
65625 {
65626 u32 curval;
65627 int i;
65628+ mm_segment_t oldfs;
65629
65630 /*
65631 * This will fail and we want it. Some arch implementations do
65632@@ -2722,8 +2729,11 @@ static int __init futex_init(void)
65633 * implementation, the non-functional ones will return
65634 * -ENOSYS.
65635 */
65636+ oldfs = get_fs();
65637+ set_fs(USER_DS);
65638 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
65639 futex_cmpxchg_enabled = 1;
65640+ set_fs(oldfs);
65641
65642 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
65643 plist_head_init(&futex_queues[i].chain);
65644diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
65645index 9b22d03..6295b62 100644
65646--- a/kernel/gcov/base.c
65647+++ b/kernel/gcov/base.c
65648@@ -102,11 +102,6 @@ void gcov_enable_events(void)
65649 }
65650
65651 #ifdef CONFIG_MODULES
65652-static inline int within(void *addr, void *start, unsigned long size)
65653-{
65654- return ((addr >= start) && (addr < start + size));
65655-}
65656-
65657 /* Update list and generate events when modules are unloaded. */
65658 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
65659 void *data)
65660@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
65661 prev = NULL;
65662 /* Remove entries located in module from linked list. */
65663 for (info = gcov_info_head; info; info = info->next) {
65664- if (within(info, mod->module_core, mod->core_size)) {
65665+ if (within_module_core_rw((unsigned long)info, mod)) {
65666 if (prev)
65667 prev->next = info->next;
65668 else
65669diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
65670index ae34bf5..4e2f3d0 100644
65671--- a/kernel/hrtimer.c
65672+++ b/kernel/hrtimer.c
65673@@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
65674 local_irq_restore(flags);
65675 }
65676
65677-static void run_hrtimer_softirq(struct softirq_action *h)
65678+static void run_hrtimer_softirq(void)
65679 {
65680 hrtimer_peek_ahead_timers();
65681 }
65682diff --git a/kernel/jump_label.c b/kernel/jump_label.c
65683index 4304919..408c4c0 100644
65684--- a/kernel/jump_label.c
65685+++ b/kernel/jump_label.c
65686@@ -13,6 +13,7 @@
65687 #include <linux/sort.h>
65688 #include <linux/err.h>
65689 #include <linux/static_key.h>
65690+#include <linux/mm.h>
65691
65692 #ifdef HAVE_JUMP_LABEL
65693
65694@@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
65695
65696 size = (((unsigned long)stop - (unsigned long)start)
65697 / sizeof(struct jump_entry));
65698+ pax_open_kernel();
65699 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
65700+ pax_close_kernel();
65701 }
65702
65703 static void jump_label_update(struct static_key *key, int enable);
65704@@ -356,10 +359,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
65705 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
65706 struct jump_entry *iter;
65707
65708+ pax_open_kernel();
65709 for (iter = iter_start; iter < iter_stop; iter++) {
65710 if (within_module_init(iter->code, mod))
65711 iter->code = 0;
65712 }
65713+ pax_close_kernel();
65714 }
65715
65716 static int
65717diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
65718index 079f1d3..a407562 100644
65719--- a/kernel/kallsyms.c
65720+++ b/kernel/kallsyms.c
65721@@ -11,6 +11,9 @@
65722 * Changed the compression method from stem compression to "table lookup"
65723 * compression (see scripts/kallsyms.c for a more complete description)
65724 */
65725+#ifdef CONFIG_GRKERNSEC_HIDESYM
65726+#define __INCLUDED_BY_HIDESYM 1
65727+#endif
65728 #include <linux/kallsyms.h>
65729 #include <linux/module.h>
65730 #include <linux/init.h>
65731@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
65732
65733 static inline int is_kernel_inittext(unsigned long addr)
65734 {
65735+ if (system_state != SYSTEM_BOOTING)
65736+ return 0;
65737+
65738 if (addr >= (unsigned long)_sinittext
65739 && addr <= (unsigned long)_einittext)
65740 return 1;
65741 return 0;
65742 }
65743
65744+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65745+#ifdef CONFIG_MODULES
65746+static inline int is_module_text(unsigned long addr)
65747+{
65748+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
65749+ return 1;
65750+
65751+ addr = ktla_ktva(addr);
65752+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
65753+}
65754+#else
65755+static inline int is_module_text(unsigned long addr)
65756+{
65757+ return 0;
65758+}
65759+#endif
65760+#endif
65761+
65762 static inline int is_kernel_text(unsigned long addr)
65763 {
65764 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
65765@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
65766
65767 static inline int is_kernel(unsigned long addr)
65768 {
65769+
65770+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65771+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
65772+ return 1;
65773+
65774+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
65775+#else
65776 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
65777+#endif
65778+
65779 return 1;
65780 return in_gate_area_no_mm(addr);
65781 }
65782
65783 static int is_ksym_addr(unsigned long addr)
65784 {
65785+
65786+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65787+ if (is_module_text(addr))
65788+ return 0;
65789+#endif
65790+
65791 if (all_var)
65792 return is_kernel(addr);
65793
65794@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
65795
65796 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
65797 {
65798- iter->name[0] = '\0';
65799 iter->nameoff = get_symbol_offset(new_pos);
65800 iter->pos = new_pos;
65801 }
65802@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
65803 {
65804 struct kallsym_iter *iter = m->private;
65805
65806+#ifdef CONFIG_GRKERNSEC_HIDESYM
65807+ if (current_uid())
65808+ return 0;
65809+#endif
65810+
65811 /* Some debugging symbols have no name. Ignore them. */
65812 if (!iter->name[0])
65813 return 0;
65814@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
65815 struct kallsym_iter *iter;
65816 int ret;
65817
65818- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
65819+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
65820 if (!iter)
65821 return -ENOMEM;
65822 reset_iter(iter, 0);
65823diff --git a/kernel/kexec.c b/kernel/kexec.c
65824index 4e2e472..cd0c7ae 100644
65825--- a/kernel/kexec.c
65826+++ b/kernel/kexec.c
65827@@ -1046,7 +1046,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
65828 unsigned long flags)
65829 {
65830 struct compat_kexec_segment in;
65831- struct kexec_segment out, __user *ksegments;
65832+ struct kexec_segment out;
65833+ struct kexec_segment __user *ksegments;
65834 unsigned long i, result;
65835
65836 /* Don't allow clients that don't understand the native
65837diff --git a/kernel/kmod.c b/kernel/kmod.c
65838index 05698a7..a4c1e3a 100644
65839--- a/kernel/kmod.c
65840+++ b/kernel/kmod.c
65841@@ -66,7 +66,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
65842 kfree(info->argv);
65843 }
65844
65845-static int call_modprobe(char *module_name, int wait)
65846+static int call_modprobe(char *module_name, char *module_param, int wait)
65847 {
65848 static char *envp[] = {
65849 "HOME=/",
65850@@ -75,7 +75,7 @@ static int call_modprobe(char *module_name, int wait)
65851 NULL
65852 };
65853
65854- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
65855+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
65856 if (!argv)
65857 goto out;
65858
65859@@ -87,7 +87,8 @@ static int call_modprobe(char *module_name, int wait)
65860 argv[1] = "-q";
65861 argv[2] = "--";
65862 argv[3] = module_name; /* check free_modprobe_argv() */
65863- argv[4] = NULL;
65864+ argv[4] = module_param;
65865+ argv[5] = NULL;
65866
65867 return call_usermodehelper_fns(modprobe_path, argv, envp,
65868 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
65869@@ -112,9 +113,8 @@ out:
65870 * If module auto-loading support is disabled then this function
65871 * becomes a no-operation.
65872 */
65873-int __request_module(bool wait, const char *fmt, ...)
65874+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
65875 {
65876- va_list args;
65877 char module_name[MODULE_NAME_LEN];
65878 unsigned int max_modprobes;
65879 int ret;
65880@@ -122,9 +122,7 @@ int __request_module(bool wait, const char *fmt, ...)
65881 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
65882 static int kmod_loop_msg;
65883
65884- va_start(args, fmt);
65885- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
65886- va_end(args);
65887+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
65888 if (ret >= MODULE_NAME_LEN)
65889 return -ENAMETOOLONG;
65890
65891@@ -132,6 +130,20 @@ int __request_module(bool wait, const char *fmt, ...)
65892 if (ret)
65893 return ret;
65894
65895+#ifdef CONFIG_GRKERNSEC_MODHARDEN
65896+ if (!current_uid()) {
65897+ /* hack to workaround consolekit/udisks stupidity */
65898+ read_lock(&tasklist_lock);
65899+ if (!strcmp(current->comm, "mount") &&
65900+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
65901+ read_unlock(&tasklist_lock);
65902+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
65903+ return -EPERM;
65904+ }
65905+ read_unlock(&tasklist_lock);
65906+ }
65907+#endif
65908+
65909 /* If modprobe needs a service that is in a module, we get a recursive
65910 * loop. Limit the number of running kmod threads to max_threads/2 or
65911 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
65912@@ -160,11 +172,52 @@ int __request_module(bool wait, const char *fmt, ...)
65913
65914 trace_module_request(module_name, wait, _RET_IP_);
65915
65916- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
65917+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
65918
65919 atomic_dec(&kmod_concurrent);
65920 return ret;
65921 }
65922+
65923+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
65924+{
65925+ va_list args;
65926+ int ret;
65927+
65928+ va_start(args, fmt);
65929+ ret = ____request_module(wait, module_param, fmt, args);
65930+ va_end(args);
65931+
65932+ return ret;
65933+}
65934+
65935+int __request_module(bool wait, const char *fmt, ...)
65936+{
65937+ va_list args;
65938+ int ret;
65939+
65940+#ifdef CONFIG_GRKERNSEC_MODHARDEN
65941+ if (current_uid()) {
65942+ char module_param[MODULE_NAME_LEN];
65943+
65944+ memset(module_param, 0, sizeof(module_param));
65945+
65946+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
65947+
65948+ va_start(args, fmt);
65949+ ret = ____request_module(wait, module_param, fmt, args);
65950+ va_end(args);
65951+
65952+ return ret;
65953+ }
65954+#endif
65955+
65956+ va_start(args, fmt);
65957+ ret = ____request_module(wait, NULL, fmt, args);
65958+ va_end(args);
65959+
65960+ return ret;
65961+}
65962+
65963 EXPORT_SYMBOL(__request_module);
65964 #endif /* CONFIG_MODULES */
65965
65966@@ -267,7 +320,7 @@ static int wait_for_helper(void *data)
65967 *
65968 * Thus the __user pointer cast is valid here.
65969 */
65970- sys_wait4(pid, (int __user *)&ret, 0, NULL);
65971+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
65972
65973 /*
65974 * If ret is 0, either ____call_usermodehelper failed and the
65975diff --git a/kernel/kprobes.c b/kernel/kprobes.c
65976index c62b854..cb67968 100644
65977--- a/kernel/kprobes.c
65978+++ b/kernel/kprobes.c
65979@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
65980 * kernel image and loaded module images reside. This is required
65981 * so x86_64 can correctly handle the %rip-relative fixups.
65982 */
65983- kip->insns = module_alloc(PAGE_SIZE);
65984+ kip->insns = module_alloc_exec(PAGE_SIZE);
65985 if (!kip->insns) {
65986 kfree(kip);
65987 return NULL;
65988@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
65989 */
65990 if (!list_is_singular(&kip->list)) {
65991 list_del(&kip->list);
65992- module_free(NULL, kip->insns);
65993+ module_free_exec(NULL, kip->insns);
65994 kfree(kip);
65995 }
65996 return 1;
65997@@ -1955,7 +1955,7 @@ static int __init init_kprobes(void)
65998 {
65999 int i, err = 0;
66000 unsigned long offset = 0, size = 0;
66001- char *modname, namebuf[128];
66002+ char *modname, namebuf[KSYM_NAME_LEN];
66003 const char *symbol_name;
66004 void *addr;
66005 struct kprobe_blackpoint *kb;
66006@@ -2081,7 +2081,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
66007 const char *sym = NULL;
66008 unsigned int i = *(loff_t *) v;
66009 unsigned long offset = 0;
66010- char *modname, namebuf[128];
66011+ char *modname, namebuf[KSYM_NAME_LEN];
66012
66013 head = &kprobe_table[i];
66014 preempt_disable();
66015diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
66016index 4e316e1..5501eef 100644
66017--- a/kernel/ksysfs.c
66018+++ b/kernel/ksysfs.c
66019@@ -47,6 +47,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
66020 {
66021 if (count+1 > UEVENT_HELPER_PATH_LEN)
66022 return -ENOENT;
66023+ if (!capable(CAP_SYS_ADMIN))
66024+ return -EPERM;
66025 memcpy(uevent_helper, buf, count);
66026 uevent_helper[count] = '\0';
66027 if (count && uevent_helper[count-1] == '\n')
66028diff --git a/kernel/lockdep.c b/kernel/lockdep.c
66029index ea9ee45..67ebc8f 100644
66030--- a/kernel/lockdep.c
66031+++ b/kernel/lockdep.c
66032@@ -590,6 +590,10 @@ static int static_obj(void *obj)
66033 end = (unsigned long) &_end,
66034 addr = (unsigned long) obj;
66035
66036+#ifdef CONFIG_PAX_KERNEXEC
66037+ start = ktla_ktva(start);
66038+#endif
66039+
66040 /*
66041 * static variable?
66042 */
66043@@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
66044 if (!static_obj(lock->key)) {
66045 debug_locks_off();
66046 printk("INFO: trying to register non-static key.\n");
66047+ printk("lock:%pS key:%pS.\n", lock, lock->key);
66048 printk("the code is fine but needs lockdep annotation.\n");
66049 printk("turning off the locking correctness validator.\n");
66050 dump_stack();
66051@@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
66052 if (!class)
66053 return 0;
66054 }
66055- atomic_inc((atomic_t *)&class->ops);
66056+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
66057 if (very_verbose(class)) {
66058 printk("\nacquire class [%p] %s", class->key, class->name);
66059 if (class->name_version > 1)
66060diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
66061index 91c32a0..b2c71c5 100644
66062--- a/kernel/lockdep_proc.c
66063+++ b/kernel/lockdep_proc.c
66064@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
66065
66066 static void print_name(struct seq_file *m, struct lock_class *class)
66067 {
66068- char str[128];
66069+ char str[KSYM_NAME_LEN];
66070 const char *name = class->name;
66071
66072 if (!name) {
66073diff --git a/kernel/module.c b/kernel/module.c
66074index 78ac6ec..e87db0e 100644
66075--- a/kernel/module.c
66076+++ b/kernel/module.c
66077@@ -58,6 +58,7 @@
66078 #include <linux/jump_label.h>
66079 #include <linux/pfn.h>
66080 #include <linux/bsearch.h>
66081+#include <linux/grsecurity.h>
66082
66083 #define CREATE_TRACE_POINTS
66084 #include <trace/events/module.h>
66085@@ -114,7 +115,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
66086
66087 /* Bounds of module allocation, for speeding __module_address.
66088 * Protected by module_mutex. */
66089-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
66090+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
66091+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
66092
66093 int register_module_notifier(struct notifier_block * nb)
66094 {
66095@@ -278,7 +280,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
66096 return true;
66097
66098 list_for_each_entry_rcu(mod, &modules, list) {
66099- struct symsearch arr[] = {
66100+ struct symsearch modarr[] = {
66101 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
66102 NOT_GPL_ONLY, false },
66103 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
66104@@ -300,7 +302,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
66105 #endif
66106 };
66107
66108- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
66109+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
66110 return true;
66111 }
66112 return false;
66113@@ -432,7 +434,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
66114 static int percpu_modalloc(struct module *mod,
66115 unsigned long size, unsigned long align)
66116 {
66117- if (align > PAGE_SIZE) {
66118+ if (align-1 >= PAGE_SIZE) {
66119 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
66120 mod->name, align, PAGE_SIZE);
66121 align = PAGE_SIZE;
66122@@ -1032,7 +1034,7 @@ struct module_attribute module_uevent =
66123 static ssize_t show_coresize(struct module_attribute *mattr,
66124 struct module_kobject *mk, char *buffer)
66125 {
66126- return sprintf(buffer, "%u\n", mk->mod->core_size);
66127+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
66128 }
66129
66130 static struct module_attribute modinfo_coresize =
66131@@ -1041,7 +1043,7 @@ static struct module_attribute modinfo_coresize =
66132 static ssize_t show_initsize(struct module_attribute *mattr,
66133 struct module_kobject *mk, char *buffer)
66134 {
66135- return sprintf(buffer, "%u\n", mk->mod->init_size);
66136+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
66137 }
66138
66139 static struct module_attribute modinfo_initsize =
66140@@ -1255,7 +1257,7 @@ resolve_symbol_wait(struct module *mod,
66141 */
66142 #ifdef CONFIG_SYSFS
66143
66144-#ifdef CONFIG_KALLSYMS
66145+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66146 static inline bool sect_empty(const Elf_Shdr *sect)
66147 {
66148 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
66149@@ -1721,21 +1723,21 @@ static void set_section_ro_nx(void *base,
66150
66151 static void unset_module_core_ro_nx(struct module *mod)
66152 {
66153- set_page_attributes(mod->module_core + mod->core_text_size,
66154- mod->module_core + mod->core_size,
66155+ set_page_attributes(mod->module_core_rw,
66156+ mod->module_core_rw + mod->core_size_rw,
66157 set_memory_x);
66158- set_page_attributes(mod->module_core,
66159- mod->module_core + mod->core_ro_size,
66160+ set_page_attributes(mod->module_core_rx,
66161+ mod->module_core_rx + mod->core_size_rx,
66162 set_memory_rw);
66163 }
66164
66165 static void unset_module_init_ro_nx(struct module *mod)
66166 {
66167- set_page_attributes(mod->module_init + mod->init_text_size,
66168- mod->module_init + mod->init_size,
66169+ set_page_attributes(mod->module_init_rw,
66170+ mod->module_init_rw + mod->init_size_rw,
66171 set_memory_x);
66172- set_page_attributes(mod->module_init,
66173- mod->module_init + mod->init_ro_size,
66174+ set_page_attributes(mod->module_init_rx,
66175+ mod->module_init_rx + mod->init_size_rx,
66176 set_memory_rw);
66177 }
66178
66179@@ -1746,14 +1748,14 @@ void set_all_modules_text_rw(void)
66180
66181 mutex_lock(&module_mutex);
66182 list_for_each_entry_rcu(mod, &modules, list) {
66183- if ((mod->module_core) && (mod->core_text_size)) {
66184- set_page_attributes(mod->module_core,
66185- mod->module_core + mod->core_text_size,
66186+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
66187+ set_page_attributes(mod->module_core_rx,
66188+ mod->module_core_rx + mod->core_size_rx,
66189 set_memory_rw);
66190 }
66191- if ((mod->module_init) && (mod->init_text_size)) {
66192- set_page_attributes(mod->module_init,
66193- mod->module_init + mod->init_text_size,
66194+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
66195+ set_page_attributes(mod->module_init_rx,
66196+ mod->module_init_rx + mod->init_size_rx,
66197 set_memory_rw);
66198 }
66199 }
66200@@ -1767,14 +1769,14 @@ void set_all_modules_text_ro(void)
66201
66202 mutex_lock(&module_mutex);
66203 list_for_each_entry_rcu(mod, &modules, list) {
66204- if ((mod->module_core) && (mod->core_text_size)) {
66205- set_page_attributes(mod->module_core,
66206- mod->module_core + mod->core_text_size,
66207+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
66208+ set_page_attributes(mod->module_core_rx,
66209+ mod->module_core_rx + mod->core_size_rx,
66210 set_memory_ro);
66211 }
66212- if ((mod->module_init) && (mod->init_text_size)) {
66213- set_page_attributes(mod->module_init,
66214- mod->module_init + mod->init_text_size,
66215+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
66216+ set_page_attributes(mod->module_init_rx,
66217+ mod->module_init_rx + mod->init_size_rx,
66218 set_memory_ro);
66219 }
66220 }
66221@@ -1820,16 +1822,19 @@ static void free_module(struct module *mod)
66222
66223 /* This may be NULL, but that's OK */
66224 unset_module_init_ro_nx(mod);
66225- module_free(mod, mod->module_init);
66226+ module_free(mod, mod->module_init_rw);
66227+ module_free_exec(mod, mod->module_init_rx);
66228 kfree(mod->args);
66229 percpu_modfree(mod);
66230
66231 /* Free lock-classes: */
66232- lockdep_free_key_range(mod->module_core, mod->core_size);
66233+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
66234+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
66235
66236 /* Finally, free the core (containing the module structure) */
66237 unset_module_core_ro_nx(mod);
66238- module_free(mod, mod->module_core);
66239+ module_free_exec(mod, mod->module_core_rx);
66240+ module_free(mod, mod->module_core_rw);
66241
66242 #ifdef CONFIG_MPU
66243 update_protections(current->mm);
66244@@ -1899,9 +1904,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66245 int ret = 0;
66246 const struct kernel_symbol *ksym;
66247
66248+#ifdef CONFIG_GRKERNSEC_MODHARDEN
66249+ int is_fs_load = 0;
66250+ int register_filesystem_found = 0;
66251+ char *p;
66252+
66253+ p = strstr(mod->args, "grsec_modharden_fs");
66254+ if (p) {
66255+ char *endptr = p + strlen("grsec_modharden_fs");
66256+ /* copy \0 as well */
66257+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
66258+ is_fs_load = 1;
66259+ }
66260+#endif
66261+
66262 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
66263 const char *name = info->strtab + sym[i].st_name;
66264
66265+#ifdef CONFIG_GRKERNSEC_MODHARDEN
66266+ /* it's a real shame this will never get ripped and copied
66267+ upstream! ;(
66268+ */
66269+ if (is_fs_load && !strcmp(name, "register_filesystem"))
66270+ register_filesystem_found = 1;
66271+#endif
66272+
66273 switch (sym[i].st_shndx) {
66274 case SHN_COMMON:
66275 /* We compiled with -fno-common. These are not
66276@@ -1922,7 +1949,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66277 ksym = resolve_symbol_wait(mod, info, name);
66278 /* Ok if resolved. */
66279 if (ksym && !IS_ERR(ksym)) {
66280+ pax_open_kernel();
66281 sym[i].st_value = ksym->value;
66282+ pax_close_kernel();
66283 break;
66284 }
66285
66286@@ -1941,11 +1970,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66287 secbase = (unsigned long)mod_percpu(mod);
66288 else
66289 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
66290+ pax_open_kernel();
66291 sym[i].st_value += secbase;
66292+ pax_close_kernel();
66293 break;
66294 }
66295 }
66296
66297+#ifdef CONFIG_GRKERNSEC_MODHARDEN
66298+ if (is_fs_load && !register_filesystem_found) {
66299+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
66300+ ret = -EPERM;
66301+ }
66302+#endif
66303+
66304 return ret;
66305 }
66306
66307@@ -2049,22 +2087,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
66308 || s->sh_entsize != ~0UL
66309 || strstarts(sname, ".init"))
66310 continue;
66311- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
66312+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
66313+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
66314+ else
66315+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
66316 pr_debug("\t%s\n", sname);
66317 }
66318- switch (m) {
66319- case 0: /* executable */
66320- mod->core_size = debug_align(mod->core_size);
66321- mod->core_text_size = mod->core_size;
66322- break;
66323- case 1: /* RO: text and ro-data */
66324- mod->core_size = debug_align(mod->core_size);
66325- mod->core_ro_size = mod->core_size;
66326- break;
66327- case 3: /* whole core */
66328- mod->core_size = debug_align(mod->core_size);
66329- break;
66330- }
66331 }
66332
66333 pr_debug("Init section allocation order:\n");
66334@@ -2078,23 +2106,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
66335 || s->sh_entsize != ~0UL
66336 || !strstarts(sname, ".init"))
66337 continue;
66338- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
66339- | INIT_OFFSET_MASK);
66340+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
66341+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
66342+ else
66343+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
66344+ s->sh_entsize |= INIT_OFFSET_MASK;
66345 pr_debug("\t%s\n", sname);
66346 }
66347- switch (m) {
66348- case 0: /* executable */
66349- mod->init_size = debug_align(mod->init_size);
66350- mod->init_text_size = mod->init_size;
66351- break;
66352- case 1: /* RO: text and ro-data */
66353- mod->init_size = debug_align(mod->init_size);
66354- mod->init_ro_size = mod->init_size;
66355- break;
66356- case 3: /* whole init */
66357- mod->init_size = debug_align(mod->init_size);
66358- break;
66359- }
66360 }
66361 }
66362
66363@@ -2266,7 +2284,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
66364
66365 /* Put symbol section at end of init part of module. */
66366 symsect->sh_flags |= SHF_ALLOC;
66367- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
66368+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
66369 info->index.sym) | INIT_OFFSET_MASK;
66370 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
66371
66372@@ -2281,13 +2299,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
66373 }
66374
66375 /* Append room for core symbols at end of core part. */
66376- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
66377- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
66378- mod->core_size += strtab_size;
66379+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
66380+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
66381+ mod->core_size_rx += strtab_size;
66382
66383 /* Put string table section at end of init part of module. */
66384 strsect->sh_flags |= SHF_ALLOC;
66385- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
66386+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
66387 info->index.str) | INIT_OFFSET_MASK;
66388 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
66389 }
66390@@ -2305,12 +2323,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
66391 /* Make sure we get permanent strtab: don't use info->strtab. */
66392 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
66393
66394+ pax_open_kernel();
66395+
66396 /* Set types up while we still have access to sections. */
66397 for (i = 0; i < mod->num_symtab; i++)
66398 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
66399
66400- mod->core_symtab = dst = mod->module_core + info->symoffs;
66401- mod->core_strtab = s = mod->module_core + info->stroffs;
66402+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
66403+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
66404 src = mod->symtab;
66405 *dst = *src;
66406 *s++ = 0;
66407@@ -2323,6 +2343,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
66408 s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1;
66409 }
66410 mod->core_num_syms = ndst;
66411+
66412+ pax_close_kernel();
66413 }
66414 #else
66415 static inline void layout_symtab(struct module *mod, struct load_info *info)
66416@@ -2356,17 +2378,33 @@ void * __weak module_alloc(unsigned long size)
66417 return size == 0 ? NULL : vmalloc_exec(size);
66418 }
66419
66420-static void *module_alloc_update_bounds(unsigned long size)
66421+static void *module_alloc_update_bounds_rw(unsigned long size)
66422 {
66423 void *ret = module_alloc(size);
66424
66425 if (ret) {
66426 mutex_lock(&module_mutex);
66427 /* Update module bounds. */
66428- if ((unsigned long)ret < module_addr_min)
66429- module_addr_min = (unsigned long)ret;
66430- if ((unsigned long)ret + size > module_addr_max)
66431- module_addr_max = (unsigned long)ret + size;
66432+ if ((unsigned long)ret < module_addr_min_rw)
66433+ module_addr_min_rw = (unsigned long)ret;
66434+ if ((unsigned long)ret + size > module_addr_max_rw)
66435+ module_addr_max_rw = (unsigned long)ret + size;
66436+ mutex_unlock(&module_mutex);
66437+ }
66438+ return ret;
66439+}
66440+
66441+static void *module_alloc_update_bounds_rx(unsigned long size)
66442+{
66443+ void *ret = module_alloc_exec(size);
66444+
66445+ if (ret) {
66446+ mutex_lock(&module_mutex);
66447+ /* Update module bounds. */
66448+ if ((unsigned long)ret < module_addr_min_rx)
66449+ module_addr_min_rx = (unsigned long)ret;
66450+ if ((unsigned long)ret + size > module_addr_max_rx)
66451+ module_addr_max_rx = (unsigned long)ret + size;
66452 mutex_unlock(&module_mutex);
66453 }
66454 return ret;
66455@@ -2543,8 +2581,14 @@ static struct module *setup_load_info(struct load_info *info)
66456 static int check_modinfo(struct module *mod, struct load_info *info)
66457 {
66458 const char *modmagic = get_modinfo(info, "vermagic");
66459+ const char *license = get_modinfo(info, "license");
66460 int err;
66461
66462+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
66463+ if (!license || !license_is_gpl_compatible(license))
66464+ return -ENOEXEC;
66465+#endif
66466+
66467 /* This is allowed: modprobe --force will invalidate it. */
66468 if (!modmagic) {
66469 err = try_to_force_load(mod, "bad vermagic");
66470@@ -2567,7 +2611,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
66471 }
66472
66473 /* Set up license info based on the info section */
66474- set_license(mod, get_modinfo(info, "license"));
66475+ set_license(mod, license);
66476
66477 return 0;
66478 }
66479@@ -2661,7 +2705,7 @@ static int move_module(struct module *mod, struct load_info *info)
66480 void *ptr;
66481
66482 /* Do the allocs. */
66483- ptr = module_alloc_update_bounds(mod->core_size);
66484+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
66485 /*
66486 * The pointer to this block is stored in the module structure
66487 * which is inside the block. Just mark it as not being a
66488@@ -2671,23 +2715,50 @@ static int move_module(struct module *mod, struct load_info *info)
66489 if (!ptr)
66490 return -ENOMEM;
66491
66492- memset(ptr, 0, mod->core_size);
66493- mod->module_core = ptr;
66494+ memset(ptr, 0, mod->core_size_rw);
66495+ mod->module_core_rw = ptr;
66496
66497- ptr = module_alloc_update_bounds(mod->init_size);
66498+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
66499 /*
66500 * The pointer to this block is stored in the module structure
66501 * which is inside the block. This block doesn't need to be
66502 * scanned as it contains data and code that will be freed
66503 * after the module is initialized.
66504 */
66505- kmemleak_ignore(ptr);
66506- if (!ptr && mod->init_size) {
66507- module_free(mod, mod->module_core);
66508+ kmemleak_not_leak(ptr);
66509+ if (!ptr && mod->init_size_rw) {
66510+ module_free(mod, mod->module_core_rw);
66511 return -ENOMEM;
66512 }
66513- memset(ptr, 0, mod->init_size);
66514- mod->module_init = ptr;
66515+ memset(ptr, 0, mod->init_size_rw);
66516+ mod->module_init_rw = ptr;
66517+
66518+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
66519+ kmemleak_not_leak(ptr);
66520+ if (!ptr) {
66521+ module_free(mod, mod->module_init_rw);
66522+ module_free(mod, mod->module_core_rw);
66523+ return -ENOMEM;
66524+ }
66525+
66526+ pax_open_kernel();
66527+ memset(ptr, 0, mod->core_size_rx);
66528+ pax_close_kernel();
66529+ mod->module_core_rx = ptr;
66530+
66531+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
66532+ kmemleak_not_leak(ptr);
66533+ if (!ptr && mod->init_size_rx) {
66534+ module_free_exec(mod, mod->module_core_rx);
66535+ module_free(mod, mod->module_init_rw);
66536+ module_free(mod, mod->module_core_rw);
66537+ return -ENOMEM;
66538+ }
66539+
66540+ pax_open_kernel();
66541+ memset(ptr, 0, mod->init_size_rx);
66542+ pax_close_kernel();
66543+ mod->module_init_rx = ptr;
66544
66545 /* Transfer each section which specifies SHF_ALLOC */
66546 pr_debug("final section addresses:\n");
66547@@ -2698,16 +2769,45 @@ static int move_module(struct module *mod, struct load_info *info)
66548 if (!(shdr->sh_flags & SHF_ALLOC))
66549 continue;
66550
66551- if (shdr->sh_entsize & INIT_OFFSET_MASK)
66552- dest = mod->module_init
66553- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66554- else
66555- dest = mod->module_core + shdr->sh_entsize;
66556+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
66557+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
66558+ dest = mod->module_init_rw
66559+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66560+ else
66561+ dest = mod->module_init_rx
66562+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66563+ } else {
66564+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
66565+ dest = mod->module_core_rw + shdr->sh_entsize;
66566+ else
66567+ dest = mod->module_core_rx + shdr->sh_entsize;
66568+ }
66569+
66570+ if (shdr->sh_type != SHT_NOBITS) {
66571+
66572+#ifdef CONFIG_PAX_KERNEXEC
66573+#ifdef CONFIG_X86_64
66574+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
66575+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
66576+#endif
66577+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
66578+ pax_open_kernel();
66579+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
66580+ pax_close_kernel();
66581+ } else
66582+#endif
66583
66584- if (shdr->sh_type != SHT_NOBITS)
66585 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
66586+ }
66587 /* Update sh_addr to point to copy in image. */
66588- shdr->sh_addr = (unsigned long)dest;
66589+
66590+#ifdef CONFIG_PAX_KERNEXEC
66591+ if (shdr->sh_flags & SHF_EXECINSTR)
66592+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
66593+ else
66594+#endif
66595+
66596+ shdr->sh_addr = (unsigned long)dest;
66597 pr_debug("\t0x%lx %s\n",
66598 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
66599 }
66600@@ -2758,12 +2858,12 @@ static void flush_module_icache(const struct module *mod)
66601 * Do it before processing of module parameters, so the module
66602 * can provide parameter accessor functions of its own.
66603 */
66604- if (mod->module_init)
66605- flush_icache_range((unsigned long)mod->module_init,
66606- (unsigned long)mod->module_init
66607- + mod->init_size);
66608- flush_icache_range((unsigned long)mod->module_core,
66609- (unsigned long)mod->module_core + mod->core_size);
66610+ if (mod->module_init_rx)
66611+ flush_icache_range((unsigned long)mod->module_init_rx,
66612+ (unsigned long)mod->module_init_rx
66613+ + mod->init_size_rx);
66614+ flush_icache_range((unsigned long)mod->module_core_rx,
66615+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
66616
66617 set_fs(old_fs);
66618 }
66619@@ -2833,8 +2933,10 @@ out:
66620 static void module_deallocate(struct module *mod, struct load_info *info)
66621 {
66622 percpu_modfree(mod);
66623- module_free(mod, mod->module_init);
66624- module_free(mod, mod->module_core);
66625+ module_free_exec(mod, mod->module_init_rx);
66626+ module_free_exec(mod, mod->module_core_rx);
66627+ module_free(mod, mod->module_init_rw);
66628+ module_free(mod, mod->module_core_rw);
66629 }
66630
66631 int __weak module_finalize(const Elf_Ehdr *hdr,
66632@@ -2898,9 +3000,38 @@ static struct module *load_module(void __user *umod,
66633 if (err)
66634 goto free_unload;
66635
66636+ /* Now copy in args */
66637+ mod->args = strndup_user(uargs, ~0UL >> 1);
66638+ if (IS_ERR(mod->args)) {
66639+ err = PTR_ERR(mod->args);
66640+ goto free_unload;
66641+ }
66642+
66643 /* Set up MODINFO_ATTR fields */
66644 setup_modinfo(mod, &info);
66645
66646+#ifdef CONFIG_GRKERNSEC_MODHARDEN
66647+ {
66648+ char *p, *p2;
66649+
66650+ if (strstr(mod->args, "grsec_modharden_netdev")) {
66651+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
66652+ err = -EPERM;
66653+ goto free_modinfo;
66654+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
66655+ p += strlen("grsec_modharden_normal");
66656+ p2 = strstr(p, "_");
66657+ if (p2) {
66658+ *p2 = '\0';
66659+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
66660+ *p2 = '_';
66661+ }
66662+ err = -EPERM;
66663+ goto free_modinfo;
66664+ }
66665+ }
66666+#endif
66667+
66668 /* Fix up syms, so that st_value is a pointer to location. */
66669 err = simplify_symbols(mod, &info);
66670 if (err < 0)
66671@@ -2916,13 +3047,6 @@ static struct module *load_module(void __user *umod,
66672
66673 flush_module_icache(mod);
66674
66675- /* Now copy in args */
66676- mod->args = strndup_user(uargs, ~0UL >> 1);
66677- if (IS_ERR(mod->args)) {
66678- err = PTR_ERR(mod->args);
66679- goto free_arch_cleanup;
66680- }
66681-
66682 /* Mark state as coming so strong_try_module_get() ignores us. */
66683 mod->state = MODULE_STATE_COMING;
66684
66685@@ -2980,11 +3104,10 @@ static struct module *load_module(void __user *umod,
66686 unlock:
66687 mutex_unlock(&module_mutex);
66688 synchronize_sched();
66689- kfree(mod->args);
66690- free_arch_cleanup:
66691 module_arch_cleanup(mod);
66692 free_modinfo:
66693 free_modinfo(mod);
66694+ kfree(mod->args);
66695 free_unload:
66696 module_unload_free(mod);
66697 free_module:
66698@@ -3025,16 +3148,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
66699 MODULE_STATE_COMING, mod);
66700
66701 /* Set RO and NX regions for core */
66702- set_section_ro_nx(mod->module_core,
66703- mod->core_text_size,
66704- mod->core_ro_size,
66705- mod->core_size);
66706+ set_section_ro_nx(mod->module_core_rx,
66707+ mod->core_size_rx,
66708+ mod->core_size_rx,
66709+ mod->core_size_rx);
66710
66711 /* Set RO and NX regions for init */
66712- set_section_ro_nx(mod->module_init,
66713- mod->init_text_size,
66714- mod->init_ro_size,
66715- mod->init_size);
66716+ set_section_ro_nx(mod->module_init_rx,
66717+ mod->init_size_rx,
66718+ mod->init_size_rx,
66719+ mod->init_size_rx);
66720
66721 do_mod_ctors(mod);
66722 /* Start the module */
66723@@ -3080,11 +3203,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
66724 mod->strtab = mod->core_strtab;
66725 #endif
66726 unset_module_init_ro_nx(mod);
66727- module_free(mod, mod->module_init);
66728- mod->module_init = NULL;
66729- mod->init_size = 0;
66730- mod->init_ro_size = 0;
66731- mod->init_text_size = 0;
66732+ module_free(mod, mod->module_init_rw);
66733+ module_free_exec(mod, mod->module_init_rx);
66734+ mod->module_init_rw = NULL;
66735+ mod->module_init_rx = NULL;
66736+ mod->init_size_rw = 0;
66737+ mod->init_size_rx = 0;
66738 mutex_unlock(&module_mutex);
66739
66740 return 0;
66741@@ -3115,10 +3239,16 @@ static const char *get_ksymbol(struct module *mod,
66742 unsigned long nextval;
66743
66744 /* At worse, next value is at end of module */
66745- if (within_module_init(addr, mod))
66746- nextval = (unsigned long)mod->module_init+mod->init_text_size;
66747+ if (within_module_init_rx(addr, mod))
66748+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
66749+ else if (within_module_init_rw(addr, mod))
66750+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
66751+ else if (within_module_core_rx(addr, mod))
66752+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
66753+ else if (within_module_core_rw(addr, mod))
66754+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
66755 else
66756- nextval = (unsigned long)mod->module_core+mod->core_text_size;
66757+ return NULL;
66758
66759 /* Scan for closest preceding symbol, and next symbol. (ELF
66760 starts real symbols at 1). */
66761@@ -3353,7 +3483,7 @@ static int m_show(struct seq_file *m, void *p)
66762 char buf[8];
66763
66764 seq_printf(m, "%s %u",
66765- mod->name, mod->init_size + mod->core_size);
66766+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
66767 print_unload_info(m, mod);
66768
66769 /* Informative for users. */
66770@@ -3362,7 +3492,7 @@ static int m_show(struct seq_file *m, void *p)
66771 mod->state == MODULE_STATE_COMING ? "Loading":
66772 "Live");
66773 /* Used by oprofile and other similar tools. */
66774- seq_printf(m, " 0x%pK", mod->module_core);
66775+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
66776
66777 /* Taints info */
66778 if (mod->taints)
66779@@ -3398,7 +3528,17 @@ static const struct file_operations proc_modules_operations = {
66780
66781 static int __init proc_modules_init(void)
66782 {
66783+#ifndef CONFIG_GRKERNSEC_HIDESYM
66784+#ifdef CONFIG_GRKERNSEC_PROC_USER
66785+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
66786+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66787+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
66788+#else
66789 proc_create("modules", 0, NULL, &proc_modules_operations);
66790+#endif
66791+#else
66792+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
66793+#endif
66794 return 0;
66795 }
66796 module_init(proc_modules_init);
66797@@ -3457,12 +3597,12 @@ struct module *__module_address(unsigned long addr)
66798 {
66799 struct module *mod;
66800
66801- if (addr < module_addr_min || addr > module_addr_max)
66802+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
66803+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
66804 return NULL;
66805
66806 list_for_each_entry_rcu(mod, &modules, list)
66807- if (within_module_core(addr, mod)
66808- || within_module_init(addr, mod))
66809+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
66810 return mod;
66811 return NULL;
66812 }
66813@@ -3496,11 +3636,20 @@ bool is_module_text_address(unsigned long addr)
66814 */
66815 struct module *__module_text_address(unsigned long addr)
66816 {
66817- struct module *mod = __module_address(addr);
66818+ struct module *mod;
66819+
66820+#ifdef CONFIG_X86_32
66821+ addr = ktla_ktva(addr);
66822+#endif
66823+
66824+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
66825+ return NULL;
66826+
66827+ mod = __module_address(addr);
66828+
66829 if (mod) {
66830 /* Make sure it's within the text section. */
66831- if (!within(addr, mod->module_init, mod->init_text_size)
66832- && !within(addr, mod->module_core, mod->core_text_size))
66833+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
66834 mod = NULL;
66835 }
66836 return mod;
66837diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
66838index 7e3443f..b2a1e6b 100644
66839--- a/kernel/mutex-debug.c
66840+++ b/kernel/mutex-debug.c
66841@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
66842 }
66843
66844 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66845- struct thread_info *ti)
66846+ struct task_struct *task)
66847 {
66848 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
66849
66850 /* Mark the current thread as blocked on the lock: */
66851- ti->task->blocked_on = waiter;
66852+ task->blocked_on = waiter;
66853 }
66854
66855 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66856- struct thread_info *ti)
66857+ struct task_struct *task)
66858 {
66859 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
66860- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
66861- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
66862- ti->task->blocked_on = NULL;
66863+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
66864+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
66865+ task->blocked_on = NULL;
66866
66867 list_del_init(&waiter->list);
66868 waiter->task = NULL;
66869diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
66870index 0799fd3..d06ae3b 100644
66871--- a/kernel/mutex-debug.h
66872+++ b/kernel/mutex-debug.h
66873@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
66874 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
66875 extern void debug_mutex_add_waiter(struct mutex *lock,
66876 struct mutex_waiter *waiter,
66877- struct thread_info *ti);
66878+ struct task_struct *task);
66879 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66880- struct thread_info *ti);
66881+ struct task_struct *task);
66882 extern void debug_mutex_unlock(struct mutex *lock);
66883 extern void debug_mutex_init(struct mutex *lock, const char *name,
66884 struct lock_class_key *key);
66885diff --git a/kernel/mutex.c b/kernel/mutex.c
66886index a307cc9..27fd2e9 100644
66887--- a/kernel/mutex.c
66888+++ b/kernel/mutex.c
66889@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66890 spin_lock_mutex(&lock->wait_lock, flags);
66891
66892 debug_mutex_lock_common(lock, &waiter);
66893- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
66894+ debug_mutex_add_waiter(lock, &waiter, task);
66895
66896 /* add waiting tasks to the end of the waitqueue (FIFO): */
66897 list_add_tail(&waiter.list, &lock->wait_list);
66898@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66899 * TASK_UNINTERRUPTIBLE case.)
66900 */
66901 if (unlikely(signal_pending_state(state, task))) {
66902- mutex_remove_waiter(lock, &waiter,
66903- task_thread_info(task));
66904+ mutex_remove_waiter(lock, &waiter, task);
66905 mutex_release(&lock->dep_map, 1, ip);
66906 spin_unlock_mutex(&lock->wait_lock, flags);
66907
66908@@ -247,7 +246,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66909 done:
66910 lock_acquired(&lock->dep_map, ip);
66911 /* got the lock - rejoice! */
66912- mutex_remove_waiter(lock, &waiter, current_thread_info());
66913+ mutex_remove_waiter(lock, &waiter, task);
66914 mutex_set_owner(lock);
66915
66916 /* set it to 0 if there are no waiters left: */
66917diff --git a/kernel/panic.c b/kernel/panic.c
66918index 8ed89a1..e83856a 100644
66919--- a/kernel/panic.c
66920+++ b/kernel/panic.c
66921@@ -402,7 +402,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
66922 const char *board;
66923
66924 printk(KERN_WARNING "------------[ cut here ]------------\n");
66925- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
66926+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
66927 board = dmi_get_system_info(DMI_PRODUCT_NAME);
66928 if (board)
66929 printk(KERN_WARNING "Hardware name: %s\n", board);
66930@@ -457,7 +457,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
66931 */
66932 void __stack_chk_fail(void)
66933 {
66934- panic("stack-protector: Kernel stack is corrupted in: %p\n",
66935+ dump_stack();
66936+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
66937 __builtin_return_address(0));
66938 }
66939 EXPORT_SYMBOL(__stack_chk_fail);
66940diff --git a/kernel/pid.c b/kernel/pid.c
66941index 9f08dfa..6765c40 100644
66942--- a/kernel/pid.c
66943+++ b/kernel/pid.c
66944@@ -33,6 +33,7 @@
66945 #include <linux/rculist.h>
66946 #include <linux/bootmem.h>
66947 #include <linux/hash.h>
66948+#include <linux/security.h>
66949 #include <linux/pid_namespace.h>
66950 #include <linux/init_task.h>
66951 #include <linux/syscalls.h>
66952@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
66953
66954 int pid_max = PID_MAX_DEFAULT;
66955
66956-#define RESERVED_PIDS 300
66957+#define RESERVED_PIDS 500
66958
66959 int pid_max_min = RESERVED_PIDS + 1;
66960 int pid_max_max = PID_MAX_LIMIT;
66961@@ -420,10 +421,18 @@ EXPORT_SYMBOL(pid_task);
66962 */
66963 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
66964 {
66965+ struct task_struct *task;
66966+
66967 rcu_lockdep_assert(rcu_read_lock_held(),
66968 "find_task_by_pid_ns() needs rcu_read_lock()"
66969 " protection");
66970- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
66971+
66972+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
66973+
66974+ if (gr_pid_is_chrooted(task))
66975+ return NULL;
66976+
66977+ return task;
66978 }
66979
66980 struct task_struct *find_task_by_vpid(pid_t vnr)
66981@@ -431,6 +440,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
66982 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
66983 }
66984
66985+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
66986+{
66987+ rcu_lockdep_assert(rcu_read_lock_held(),
66988+ "find_task_by_pid_ns() needs rcu_read_lock()"
66989+ " protection");
66990+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
66991+}
66992+
66993 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
66994 {
66995 struct pid *pid;
66996diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
66997index 125cb67..a4d1c30 100644
66998--- a/kernel/posix-cpu-timers.c
66999+++ b/kernel/posix-cpu-timers.c
67000@@ -6,6 +6,7 @@
67001 #include <linux/posix-timers.h>
67002 #include <linux/errno.h>
67003 #include <linux/math64.h>
67004+#include <linux/security.h>
67005 #include <asm/uaccess.h>
67006 #include <linux/kernel_stat.h>
67007 #include <trace/events/timer.h>
67008@@ -1578,14 +1579,14 @@ struct k_clock clock_posix_cpu = {
67009
67010 static __init int init_posix_cpu_timers(void)
67011 {
67012- struct k_clock process = {
67013+ static struct k_clock process = {
67014 .clock_getres = process_cpu_clock_getres,
67015 .clock_get = process_cpu_clock_get,
67016 .timer_create = process_cpu_timer_create,
67017 .nsleep = process_cpu_nsleep,
67018 .nsleep_restart = process_cpu_nsleep_restart,
67019 };
67020- struct k_clock thread = {
67021+ static struct k_clock thread = {
67022 .clock_getres = thread_cpu_clock_getres,
67023 .clock_get = thread_cpu_clock_get,
67024 .timer_create = thread_cpu_timer_create,
67025diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
67026index 69185ae..cc2847a 100644
67027--- a/kernel/posix-timers.c
67028+++ b/kernel/posix-timers.c
67029@@ -43,6 +43,7 @@
67030 #include <linux/idr.h>
67031 #include <linux/posix-clock.h>
67032 #include <linux/posix-timers.h>
67033+#include <linux/grsecurity.h>
67034 #include <linux/syscalls.h>
67035 #include <linux/wait.h>
67036 #include <linux/workqueue.h>
67037@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
67038 * which we beg off on and pass to do_sys_settimeofday().
67039 */
67040
67041-static struct k_clock posix_clocks[MAX_CLOCKS];
67042+static struct k_clock *posix_clocks[MAX_CLOCKS];
67043
67044 /*
67045 * These ones are defined below.
67046@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
67047 */
67048 static __init int init_posix_timers(void)
67049 {
67050- struct k_clock clock_realtime = {
67051+ static struct k_clock clock_realtime = {
67052 .clock_getres = hrtimer_get_res,
67053 .clock_get = posix_clock_realtime_get,
67054 .clock_set = posix_clock_realtime_set,
67055@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
67056 .timer_get = common_timer_get,
67057 .timer_del = common_timer_del,
67058 };
67059- struct k_clock clock_monotonic = {
67060+ static struct k_clock clock_monotonic = {
67061 .clock_getres = hrtimer_get_res,
67062 .clock_get = posix_ktime_get_ts,
67063 .nsleep = common_nsleep,
67064@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
67065 .timer_get = common_timer_get,
67066 .timer_del = common_timer_del,
67067 };
67068- struct k_clock clock_monotonic_raw = {
67069+ static struct k_clock clock_monotonic_raw = {
67070 .clock_getres = hrtimer_get_res,
67071 .clock_get = posix_get_monotonic_raw,
67072 };
67073- struct k_clock clock_realtime_coarse = {
67074+ static struct k_clock clock_realtime_coarse = {
67075 .clock_getres = posix_get_coarse_res,
67076 .clock_get = posix_get_realtime_coarse,
67077 };
67078- struct k_clock clock_monotonic_coarse = {
67079+ static struct k_clock clock_monotonic_coarse = {
67080 .clock_getres = posix_get_coarse_res,
67081 .clock_get = posix_get_monotonic_coarse,
67082 };
67083- struct k_clock clock_boottime = {
67084+ static struct k_clock clock_boottime = {
67085 .clock_getres = hrtimer_get_res,
67086 .clock_get = posix_get_boottime,
67087 .nsleep = common_nsleep,
67088@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
67089 return;
67090 }
67091
67092- posix_clocks[clock_id] = *new_clock;
67093+ posix_clocks[clock_id] = new_clock;
67094 }
67095 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
67096
67097@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
67098 return (id & CLOCKFD_MASK) == CLOCKFD ?
67099 &clock_posix_dynamic : &clock_posix_cpu;
67100
67101- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
67102+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
67103 return NULL;
67104- return &posix_clocks[id];
67105+ return posix_clocks[id];
67106 }
67107
67108 static int common_timer_create(struct k_itimer *new_timer)
67109@@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
67110 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
67111 return -EFAULT;
67112
67113+ /* only the CLOCK_REALTIME clock can be set, all other clocks
67114+ have their clock_set fptr set to a nosettime dummy function
67115+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
67116+ call common_clock_set, which calls do_sys_settimeofday, which
67117+ we hook
67118+ */
67119+
67120 return kc->clock_set(which_clock, &new_tp);
67121 }
67122
67123diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
67124index d523593..68197a4 100644
67125--- a/kernel/power/poweroff.c
67126+++ b/kernel/power/poweroff.c
67127@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
67128 .enable_mask = SYSRQ_ENABLE_BOOT,
67129 };
67130
67131-static int pm_sysrq_init(void)
67132+static int __init pm_sysrq_init(void)
67133 {
67134 register_sysrq_key('o', &sysrq_poweroff_op);
67135 return 0;
67136diff --git a/kernel/power/process.c b/kernel/power/process.c
67137index 19db29f..33b52b6 100644
67138--- a/kernel/power/process.c
67139+++ b/kernel/power/process.c
67140@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
67141 u64 elapsed_csecs64;
67142 unsigned int elapsed_csecs;
67143 bool wakeup = false;
67144+ bool timedout = false;
67145
67146 do_gettimeofday(&start);
67147
67148@@ -43,6 +44,8 @@ static int try_to_freeze_tasks(bool user_only)
67149
67150 while (true) {
67151 todo = 0;
67152+ if (time_after(jiffies, end_time))
67153+ timedout = true;
67154 read_lock(&tasklist_lock);
67155 do_each_thread(g, p) {
67156 if (p == current || !freeze_task(p))
67157@@ -58,9 +61,13 @@ static int try_to_freeze_tasks(bool user_only)
67158 * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING
67159 * transition can't race with task state testing here.
67160 */
67161- if (!task_is_stopped_or_traced(p) &&
67162- !freezer_should_skip(p))
67163+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
67164 todo++;
67165+ if (timedout) {
67166+ printk(KERN_ERR "Task refusing to freeze:\n");
67167+ sched_show_task(p);
67168+ }
67169+ }
67170 } while_each_thread(g, p);
67171 read_unlock(&tasklist_lock);
67172
67173@@ -69,7 +76,7 @@ static int try_to_freeze_tasks(bool user_only)
67174 todo += wq_busy;
67175 }
67176
67177- if (!todo || time_after(jiffies, end_time))
67178+ if (!todo || timedout)
67179 break;
67180
67181 if (pm_wakeup_pending()) {
67182diff --git a/kernel/printk.c b/kernel/printk.c
67183index b663c2c..1d6ba7a 100644
67184--- a/kernel/printk.c
67185+++ b/kernel/printk.c
67186@@ -316,6 +316,11 @@ static int check_syslog_permissions(int type, bool from_file)
67187 if (from_file && type != SYSLOG_ACTION_OPEN)
67188 return 0;
67189
67190+#ifdef CONFIG_GRKERNSEC_DMESG
67191+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
67192+ return -EPERM;
67193+#endif
67194+
67195 if (syslog_action_restricted(type)) {
67196 if (capable(CAP_SYSLOG))
67197 return 0;
67198diff --git a/kernel/profile.c b/kernel/profile.c
67199index 76b8e77..a2930e8 100644
67200--- a/kernel/profile.c
67201+++ b/kernel/profile.c
67202@@ -39,7 +39,7 @@ struct profile_hit {
67203 /* Oprofile timer tick hook */
67204 static int (*timer_hook)(struct pt_regs *) __read_mostly;
67205
67206-static atomic_t *prof_buffer;
67207+static atomic_unchecked_t *prof_buffer;
67208 static unsigned long prof_len, prof_shift;
67209
67210 int prof_on __read_mostly;
67211@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
67212 hits[i].pc = 0;
67213 continue;
67214 }
67215- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67216+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67217 hits[i].hits = hits[i].pc = 0;
67218 }
67219 }
67220@@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67221 * Add the current hit(s) and flush the write-queue out
67222 * to the global buffer:
67223 */
67224- atomic_add(nr_hits, &prof_buffer[pc]);
67225+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
67226 for (i = 0; i < NR_PROFILE_HIT; ++i) {
67227- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67228+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67229 hits[i].pc = hits[i].hits = 0;
67230 }
67231 out:
67232@@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67233 {
67234 unsigned long pc;
67235 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
67236- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67237+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67238 }
67239 #endif /* !CONFIG_SMP */
67240
67241@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
67242 return -EFAULT;
67243 buf++; p++; count--; read++;
67244 }
67245- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
67246+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
67247 if (copy_to_user(buf, (void *)pnt, count))
67248 return -EFAULT;
67249 read += count;
67250@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
67251 }
67252 #endif
67253 profile_discard_flip_buffers();
67254- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
67255+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
67256 return count;
67257 }
67258
67259diff --git a/kernel/ptrace.c b/kernel/ptrace.c
67260index ee8d49b..bd3d790 100644
67261--- a/kernel/ptrace.c
67262+++ b/kernel/ptrace.c
67263@@ -280,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request,
67264
67265 if (seize)
67266 flags |= PT_SEIZED;
67267- if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
67268+ if (ns_capable_nolog(task_user_ns(task), CAP_SYS_PTRACE))
67269 flags |= PT_PTRACE_CAP;
67270 task->ptrace = flags;
67271
67272@@ -487,7 +487,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
67273 break;
67274 return -EIO;
67275 }
67276- if (copy_to_user(dst, buf, retval))
67277+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
67278 return -EFAULT;
67279 copied += retval;
67280 src += retval;
67281@@ -672,7 +672,7 @@ int ptrace_request(struct task_struct *child, long request,
67282 bool seized = child->ptrace & PT_SEIZED;
67283 int ret = -EIO;
67284 siginfo_t siginfo, *si;
67285- void __user *datavp = (void __user *) data;
67286+ void __user *datavp = (__force void __user *) data;
67287 unsigned long __user *datalp = datavp;
67288 unsigned long flags;
67289
67290@@ -874,14 +874,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
67291 goto out;
67292 }
67293
67294+ if (gr_handle_ptrace(child, request)) {
67295+ ret = -EPERM;
67296+ goto out_put_task_struct;
67297+ }
67298+
67299 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
67300 ret = ptrace_attach(child, request, addr, data);
67301 /*
67302 * Some architectures need to do book-keeping after
67303 * a ptrace attach.
67304 */
67305- if (!ret)
67306+ if (!ret) {
67307 arch_ptrace_attach(child);
67308+ gr_audit_ptrace(child);
67309+ }
67310 goto out_put_task_struct;
67311 }
67312
67313@@ -907,7 +914,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
67314 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
67315 if (copied != sizeof(tmp))
67316 return -EIO;
67317- return put_user(tmp, (unsigned long __user *)data);
67318+ return put_user(tmp, (__force unsigned long __user *)data);
67319 }
67320
67321 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
67322@@ -1017,14 +1024,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
67323 goto out;
67324 }
67325
67326+ if (gr_handle_ptrace(child, request)) {
67327+ ret = -EPERM;
67328+ goto out_put_task_struct;
67329+ }
67330+
67331 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
67332 ret = ptrace_attach(child, request, addr, data);
67333 /*
67334 * Some architectures need to do book-keeping after
67335 * a ptrace attach.
67336 */
67337- if (!ret)
67338+ if (!ret) {
67339 arch_ptrace_attach(child);
67340+ gr_audit_ptrace(child);
67341+ }
67342 goto out_put_task_struct;
67343 }
67344
67345diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
67346index 37a5444..eec170a 100644
67347--- a/kernel/rcutiny.c
67348+++ b/kernel/rcutiny.c
67349@@ -46,7 +46,7 @@
67350 struct rcu_ctrlblk;
67351 static void invoke_rcu_callbacks(void);
67352 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
67353-static void rcu_process_callbacks(struct softirq_action *unused);
67354+static void rcu_process_callbacks(void);
67355 static void __call_rcu(struct rcu_head *head,
67356 void (*func)(struct rcu_head *rcu),
67357 struct rcu_ctrlblk *rcp);
67358@@ -307,7 +307,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
67359 rcu_is_callbacks_kthread()));
67360 }
67361
67362-static void rcu_process_callbacks(struct softirq_action *unused)
67363+static void rcu_process_callbacks(void)
67364 {
67365 __rcu_process_callbacks(&rcu_sched_ctrlblk);
67366 __rcu_process_callbacks(&rcu_bh_ctrlblk);
67367diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
67368index 22ecea0..3789898 100644
67369--- a/kernel/rcutiny_plugin.h
67370+++ b/kernel/rcutiny_plugin.h
67371@@ -955,7 +955,7 @@ static int rcu_kthread(void *arg)
67372 have_rcu_kthread_work = morework;
67373 local_irq_restore(flags);
67374 if (work)
67375- rcu_process_callbacks(NULL);
67376+ rcu_process_callbacks();
67377 schedule_timeout_interruptible(1); /* Leave CPU for others. */
67378 }
67379
67380diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
67381index a89b381..efdcad8 100644
67382--- a/kernel/rcutorture.c
67383+++ b/kernel/rcutorture.c
67384@@ -158,12 +158,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
67385 { 0 };
67386 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
67387 { 0 };
67388-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
67389-static atomic_t n_rcu_torture_alloc;
67390-static atomic_t n_rcu_torture_alloc_fail;
67391-static atomic_t n_rcu_torture_free;
67392-static atomic_t n_rcu_torture_mberror;
67393-static atomic_t n_rcu_torture_error;
67394+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
67395+static atomic_unchecked_t n_rcu_torture_alloc;
67396+static atomic_unchecked_t n_rcu_torture_alloc_fail;
67397+static atomic_unchecked_t n_rcu_torture_free;
67398+static atomic_unchecked_t n_rcu_torture_mberror;
67399+static atomic_unchecked_t n_rcu_torture_error;
67400 static long n_rcu_torture_boost_ktrerror;
67401 static long n_rcu_torture_boost_rterror;
67402 static long n_rcu_torture_boost_failure;
67403@@ -253,11 +253,11 @@ rcu_torture_alloc(void)
67404
67405 spin_lock_bh(&rcu_torture_lock);
67406 if (list_empty(&rcu_torture_freelist)) {
67407- atomic_inc(&n_rcu_torture_alloc_fail);
67408+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
67409 spin_unlock_bh(&rcu_torture_lock);
67410 return NULL;
67411 }
67412- atomic_inc(&n_rcu_torture_alloc);
67413+ atomic_inc_unchecked(&n_rcu_torture_alloc);
67414 p = rcu_torture_freelist.next;
67415 list_del_init(p);
67416 spin_unlock_bh(&rcu_torture_lock);
67417@@ -270,7 +270,7 @@ rcu_torture_alloc(void)
67418 static void
67419 rcu_torture_free(struct rcu_torture *p)
67420 {
67421- atomic_inc(&n_rcu_torture_free);
67422+ atomic_inc_unchecked(&n_rcu_torture_free);
67423 spin_lock_bh(&rcu_torture_lock);
67424 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
67425 spin_unlock_bh(&rcu_torture_lock);
67426@@ -390,7 +390,7 @@ rcu_torture_cb(struct rcu_head *p)
67427 i = rp->rtort_pipe_count;
67428 if (i > RCU_TORTURE_PIPE_LEN)
67429 i = RCU_TORTURE_PIPE_LEN;
67430- atomic_inc(&rcu_torture_wcount[i]);
67431+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
67432 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
67433 rp->rtort_mbtest = 0;
67434 rcu_torture_free(rp);
67435@@ -437,7 +437,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
67436 i = rp->rtort_pipe_count;
67437 if (i > RCU_TORTURE_PIPE_LEN)
67438 i = RCU_TORTURE_PIPE_LEN;
67439- atomic_inc(&rcu_torture_wcount[i]);
67440+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
67441 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
67442 rp->rtort_mbtest = 0;
67443 list_del(&rp->rtort_free);
67444@@ -926,7 +926,7 @@ rcu_torture_writer(void *arg)
67445 i = old_rp->rtort_pipe_count;
67446 if (i > RCU_TORTURE_PIPE_LEN)
67447 i = RCU_TORTURE_PIPE_LEN;
67448- atomic_inc(&rcu_torture_wcount[i]);
67449+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
67450 old_rp->rtort_pipe_count++;
67451 cur_ops->deferred_free(old_rp);
67452 }
67453@@ -1007,7 +1007,7 @@ static void rcu_torture_timer(unsigned long unused)
67454 }
67455 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
67456 if (p->rtort_mbtest == 0)
67457- atomic_inc(&n_rcu_torture_mberror);
67458+ atomic_inc_unchecked(&n_rcu_torture_mberror);
67459 spin_lock(&rand_lock);
67460 cur_ops->read_delay(&rand);
67461 n_rcu_torture_timers++;
67462@@ -1071,7 +1071,7 @@ rcu_torture_reader(void *arg)
67463 }
67464 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
67465 if (p->rtort_mbtest == 0)
67466- atomic_inc(&n_rcu_torture_mberror);
67467+ atomic_inc_unchecked(&n_rcu_torture_mberror);
67468 cur_ops->read_delay(&rand);
67469 preempt_disable();
67470 pipe_count = p->rtort_pipe_count;
67471@@ -1133,10 +1133,10 @@ rcu_torture_printk(char *page)
67472 rcu_torture_current,
67473 rcu_torture_current_version,
67474 list_empty(&rcu_torture_freelist),
67475- atomic_read(&n_rcu_torture_alloc),
67476- atomic_read(&n_rcu_torture_alloc_fail),
67477- atomic_read(&n_rcu_torture_free),
67478- atomic_read(&n_rcu_torture_mberror),
67479+ atomic_read_unchecked(&n_rcu_torture_alloc),
67480+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
67481+ atomic_read_unchecked(&n_rcu_torture_free),
67482+ atomic_read_unchecked(&n_rcu_torture_mberror),
67483 n_rcu_torture_boost_ktrerror,
67484 n_rcu_torture_boost_rterror,
67485 n_rcu_torture_boost_failure,
67486@@ -1146,7 +1146,7 @@ rcu_torture_printk(char *page)
67487 n_online_attempts,
67488 n_offline_successes,
67489 n_offline_attempts);
67490- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
67491+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
67492 n_rcu_torture_boost_ktrerror != 0 ||
67493 n_rcu_torture_boost_rterror != 0 ||
67494 n_rcu_torture_boost_failure != 0)
67495@@ -1154,7 +1154,7 @@ rcu_torture_printk(char *page)
67496 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
67497 if (i > 1) {
67498 cnt += sprintf(&page[cnt], "!!! ");
67499- atomic_inc(&n_rcu_torture_error);
67500+ atomic_inc_unchecked(&n_rcu_torture_error);
67501 WARN_ON_ONCE(1);
67502 }
67503 cnt += sprintf(&page[cnt], "Reader Pipe: ");
67504@@ -1168,7 +1168,7 @@ rcu_torture_printk(char *page)
67505 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
67506 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
67507 cnt += sprintf(&page[cnt], " %d",
67508- atomic_read(&rcu_torture_wcount[i]));
67509+ atomic_read_unchecked(&rcu_torture_wcount[i]));
67510 }
67511 cnt += sprintf(&page[cnt], "\n");
67512 if (cur_ops->stats)
67513@@ -1676,7 +1676,7 @@ rcu_torture_cleanup(void)
67514
67515 if (cur_ops->cleanup)
67516 cur_ops->cleanup();
67517- if (atomic_read(&n_rcu_torture_error))
67518+ if (atomic_read_unchecked(&n_rcu_torture_error))
67519 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
67520 else if (n_online_successes != n_online_attempts ||
67521 n_offline_successes != n_offline_attempts)
67522@@ -1744,17 +1744,17 @@ rcu_torture_init(void)
67523
67524 rcu_torture_current = NULL;
67525 rcu_torture_current_version = 0;
67526- atomic_set(&n_rcu_torture_alloc, 0);
67527- atomic_set(&n_rcu_torture_alloc_fail, 0);
67528- atomic_set(&n_rcu_torture_free, 0);
67529- atomic_set(&n_rcu_torture_mberror, 0);
67530- atomic_set(&n_rcu_torture_error, 0);
67531+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
67532+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
67533+ atomic_set_unchecked(&n_rcu_torture_free, 0);
67534+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
67535+ atomic_set_unchecked(&n_rcu_torture_error, 0);
67536 n_rcu_torture_boost_ktrerror = 0;
67537 n_rcu_torture_boost_rterror = 0;
67538 n_rcu_torture_boost_failure = 0;
67539 n_rcu_torture_boosts = 0;
67540 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
67541- atomic_set(&rcu_torture_wcount[i], 0);
67542+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
67543 for_each_possible_cpu(cpu) {
67544 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
67545 per_cpu(rcu_torture_count, cpu)[i] = 0;
67546diff --git a/kernel/rcutree.c b/kernel/rcutree.c
67547index d0c5baf..109b2e7 100644
67548--- a/kernel/rcutree.c
67549+++ b/kernel/rcutree.c
67550@@ -357,9 +357,9 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
67551 rcu_prepare_for_idle(smp_processor_id());
67552 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
67553 smp_mb__before_atomic_inc(); /* See above. */
67554- atomic_inc(&rdtp->dynticks);
67555+ atomic_inc_unchecked(&rdtp->dynticks);
67556 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
67557- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
67558+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
67559
67560 /*
67561 * The idle task is not permitted to enter the idle loop while
67562@@ -448,10 +448,10 @@ void rcu_irq_exit(void)
67563 static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
67564 {
67565 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
67566- atomic_inc(&rdtp->dynticks);
67567+ atomic_inc_unchecked(&rdtp->dynticks);
67568 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
67569 smp_mb__after_atomic_inc(); /* See above. */
67570- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
67571+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
67572 rcu_cleanup_after_idle(smp_processor_id());
67573 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
67574 if (!is_idle_task(current)) {
67575@@ -545,14 +545,14 @@ void rcu_nmi_enter(void)
67576 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
67577
67578 if (rdtp->dynticks_nmi_nesting == 0 &&
67579- (atomic_read(&rdtp->dynticks) & 0x1))
67580+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
67581 return;
67582 rdtp->dynticks_nmi_nesting++;
67583 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
67584- atomic_inc(&rdtp->dynticks);
67585+ atomic_inc_unchecked(&rdtp->dynticks);
67586 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
67587 smp_mb__after_atomic_inc(); /* See above. */
67588- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
67589+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
67590 }
67591
67592 /**
67593@@ -571,9 +571,9 @@ void rcu_nmi_exit(void)
67594 return;
67595 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
67596 smp_mb__before_atomic_inc(); /* See above. */
67597- atomic_inc(&rdtp->dynticks);
67598+ atomic_inc_unchecked(&rdtp->dynticks);
67599 smp_mb__after_atomic_inc(); /* Force delay to next write. */
67600- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
67601+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
67602 }
67603
67604 #ifdef CONFIG_PROVE_RCU
67605@@ -589,7 +589,7 @@ int rcu_is_cpu_idle(void)
67606 int ret;
67607
67608 preempt_disable();
67609- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
67610+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
67611 preempt_enable();
67612 return ret;
67613 }
67614@@ -659,7 +659,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
67615 */
67616 static int dyntick_save_progress_counter(struct rcu_data *rdp)
67617 {
67618- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
67619+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
67620 return (rdp->dynticks_snap & 0x1) == 0;
67621 }
67622
67623@@ -674,7 +674,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
67624 unsigned int curr;
67625 unsigned int snap;
67626
67627- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
67628+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
67629 snap = (unsigned int)rdp->dynticks_snap;
67630
67631 /*
67632@@ -704,10 +704,10 @@ static int jiffies_till_stall_check(void)
67633 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
67634 */
67635 if (till_stall_check < 3) {
67636- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
67637+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
67638 till_stall_check = 3;
67639 } else if (till_stall_check > 300) {
67640- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
67641+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
67642 till_stall_check = 300;
67643 }
67644 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
67645@@ -1766,7 +1766,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
67646 /*
67647 * Do RCU core processing for the current CPU.
67648 */
67649-static void rcu_process_callbacks(struct softirq_action *unused)
67650+static void rcu_process_callbacks(void)
67651 {
67652 trace_rcu_utilization("Start RCU core");
67653 __rcu_process_callbacks(&rcu_sched_state,
67654@@ -1949,8 +1949,8 @@ void synchronize_rcu_bh(void)
67655 }
67656 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
67657
67658-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
67659-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
67660+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
67661+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
67662
67663 static int synchronize_sched_expedited_cpu_stop(void *data)
67664 {
67665@@ -2011,7 +2011,7 @@ void synchronize_sched_expedited(void)
67666 int firstsnap, s, snap, trycount = 0;
67667
67668 /* Note that atomic_inc_return() implies full memory barrier. */
67669- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
67670+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
67671 get_online_cpus();
67672 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
67673
67674@@ -2033,7 +2033,7 @@ void synchronize_sched_expedited(void)
67675 }
67676
67677 /* Check to see if someone else did our work for us. */
67678- s = atomic_read(&sync_sched_expedited_done);
67679+ s = atomic_read_unchecked(&sync_sched_expedited_done);
67680 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
67681 smp_mb(); /* ensure test happens before caller kfree */
67682 return;
67683@@ -2048,7 +2048,7 @@ void synchronize_sched_expedited(void)
67684 * grace period works for us.
67685 */
67686 get_online_cpus();
67687- snap = atomic_read(&sync_sched_expedited_started);
67688+ snap = atomic_read_unchecked(&sync_sched_expedited_started);
67689 smp_mb(); /* ensure read is before try_stop_cpus(). */
67690 }
67691
67692@@ -2059,12 +2059,12 @@ void synchronize_sched_expedited(void)
67693 * than we did beat us to the punch.
67694 */
67695 do {
67696- s = atomic_read(&sync_sched_expedited_done);
67697+ s = atomic_read_unchecked(&sync_sched_expedited_done);
67698 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
67699 smp_mb(); /* ensure test happens before caller kfree */
67700 break;
67701 }
67702- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
67703+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
67704
67705 put_online_cpus();
67706 }
67707@@ -2262,7 +2262,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
67708 rdp->qlen = 0;
67709 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
67710 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
67711- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
67712+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
67713 rdp->cpu = cpu;
67714 rdp->rsp = rsp;
67715 raw_spin_unlock_irqrestore(&rnp->lock, flags);
67716@@ -2290,8 +2290,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
67717 rdp->n_force_qs_snap = rsp->n_force_qs;
67718 rdp->blimit = blimit;
67719 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
67720- atomic_set(&rdp->dynticks->dynticks,
67721- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
67722+ atomic_set_unchecked(&rdp->dynticks->dynticks,
67723+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
67724 rcu_prepare_for_idle_init(cpu);
67725 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
67726
67727diff --git a/kernel/rcutree.h b/kernel/rcutree.h
67728index cdd1be0..5b2efb4 100644
67729--- a/kernel/rcutree.h
67730+++ b/kernel/rcutree.h
67731@@ -87,7 +87,7 @@ struct rcu_dynticks {
67732 long long dynticks_nesting; /* Track irq/process nesting level. */
67733 /* Process level is worth LLONG_MAX/2. */
67734 int dynticks_nmi_nesting; /* Track NMI nesting level. */
67735- atomic_t dynticks; /* Even value for idle, else odd. */
67736+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
67737 };
67738
67739 /* RCU's kthread states for tracing. */
67740diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
67741index c023464..7f57225 100644
67742--- a/kernel/rcutree_plugin.h
67743+++ b/kernel/rcutree_plugin.h
67744@@ -909,7 +909,7 @@ void synchronize_rcu_expedited(void)
67745
67746 /* Clean up and exit. */
67747 smp_mb(); /* ensure expedited GP seen before counter increment. */
67748- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
67749+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
67750 unlock_mb_ret:
67751 mutex_unlock(&sync_rcu_preempt_exp_mutex);
67752 mb_ret:
67753diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
67754index ed459ed..a03c3fa 100644
67755--- a/kernel/rcutree_trace.c
67756+++ b/kernel/rcutree_trace.c
67757@@ -68,7 +68,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
67758 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
67759 rdp->qs_pending);
67760 seq_printf(m, " dt=%d/%llx/%d df=%lu",
67761- atomic_read(&rdp->dynticks->dynticks),
67762+ atomic_read_unchecked(&rdp->dynticks->dynticks),
67763 rdp->dynticks->dynticks_nesting,
67764 rdp->dynticks->dynticks_nmi_nesting,
67765 rdp->dynticks_fqs);
67766@@ -140,7 +140,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
67767 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
67768 rdp->qs_pending);
67769 seq_printf(m, ",%d,%llx,%d,%lu",
67770- atomic_read(&rdp->dynticks->dynticks),
67771+ atomic_read_unchecked(&rdp->dynticks->dynticks),
67772 rdp->dynticks->dynticks_nesting,
67773 rdp->dynticks->dynticks_nmi_nesting,
67774 rdp->dynticks_fqs);
67775diff --git a/kernel/resource.c b/kernel/resource.c
67776index 7e8ea66..1efd11f 100644
67777--- a/kernel/resource.c
67778+++ b/kernel/resource.c
67779@@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
67780
67781 static int __init ioresources_init(void)
67782 {
67783+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67784+#ifdef CONFIG_GRKERNSEC_PROC_USER
67785+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
67786+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
67787+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67788+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
67789+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
67790+#endif
67791+#else
67792 proc_create("ioports", 0, NULL, &proc_ioports_operations);
67793 proc_create("iomem", 0, NULL, &proc_iomem_operations);
67794+#endif
67795 return 0;
67796 }
67797 __initcall(ioresources_init);
67798diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
67799index 98ec494..4241d6d 100644
67800--- a/kernel/rtmutex-tester.c
67801+++ b/kernel/rtmutex-tester.c
67802@@ -20,7 +20,7 @@
67803 #define MAX_RT_TEST_MUTEXES 8
67804
67805 static spinlock_t rttest_lock;
67806-static atomic_t rttest_event;
67807+static atomic_unchecked_t rttest_event;
67808
67809 struct test_thread_data {
67810 int opcode;
67811@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67812
67813 case RTTEST_LOCKCONT:
67814 td->mutexes[td->opdata] = 1;
67815- td->event = atomic_add_return(1, &rttest_event);
67816+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67817 return 0;
67818
67819 case RTTEST_RESET:
67820@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67821 return 0;
67822
67823 case RTTEST_RESETEVENT:
67824- atomic_set(&rttest_event, 0);
67825+ atomic_set_unchecked(&rttest_event, 0);
67826 return 0;
67827
67828 default:
67829@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67830 return ret;
67831
67832 td->mutexes[id] = 1;
67833- td->event = atomic_add_return(1, &rttest_event);
67834+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67835 rt_mutex_lock(&mutexes[id]);
67836- td->event = atomic_add_return(1, &rttest_event);
67837+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67838 td->mutexes[id] = 4;
67839 return 0;
67840
67841@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67842 return ret;
67843
67844 td->mutexes[id] = 1;
67845- td->event = atomic_add_return(1, &rttest_event);
67846+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67847 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
67848- td->event = atomic_add_return(1, &rttest_event);
67849+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67850 td->mutexes[id] = ret ? 0 : 4;
67851 return ret ? -EINTR : 0;
67852
67853@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67854 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
67855 return ret;
67856
67857- td->event = atomic_add_return(1, &rttest_event);
67858+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67859 rt_mutex_unlock(&mutexes[id]);
67860- td->event = atomic_add_return(1, &rttest_event);
67861+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67862 td->mutexes[id] = 0;
67863 return 0;
67864
67865@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67866 break;
67867
67868 td->mutexes[dat] = 2;
67869- td->event = atomic_add_return(1, &rttest_event);
67870+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67871 break;
67872
67873 default:
67874@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67875 return;
67876
67877 td->mutexes[dat] = 3;
67878- td->event = atomic_add_return(1, &rttest_event);
67879+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67880 break;
67881
67882 case RTTEST_LOCKNOWAIT:
67883@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67884 return;
67885
67886 td->mutexes[dat] = 1;
67887- td->event = atomic_add_return(1, &rttest_event);
67888+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67889 return;
67890
67891 default:
67892diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
67893index 0984a21..939f183 100644
67894--- a/kernel/sched/auto_group.c
67895+++ b/kernel/sched/auto_group.c
67896@@ -11,7 +11,7 @@
67897
67898 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
67899 static struct autogroup autogroup_default;
67900-static atomic_t autogroup_seq_nr;
67901+static atomic_unchecked_t autogroup_seq_nr;
67902
67903 void __init autogroup_init(struct task_struct *init_task)
67904 {
67905@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
67906
67907 kref_init(&ag->kref);
67908 init_rwsem(&ag->lock);
67909- ag->id = atomic_inc_return(&autogroup_seq_nr);
67910+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
67911 ag->tg = tg;
67912 #ifdef CONFIG_RT_GROUP_SCHED
67913 /*
67914diff --git a/kernel/sched/core.c b/kernel/sched/core.c
67915index 2000e06..79cf3d8 100644
67916--- a/kernel/sched/core.c
67917+++ b/kernel/sched/core.c
67918@@ -3907,6 +3907,8 @@ int can_nice(const struct task_struct *p, const int nice)
67919 /* convert nice value [19,-20] to rlimit style value [1,40] */
67920 int nice_rlim = 20 - nice;
67921
67922+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
67923+
67924 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
67925 capable(CAP_SYS_NICE));
67926 }
67927@@ -3940,7 +3942,8 @@ SYSCALL_DEFINE1(nice, int, increment)
67928 if (nice > 19)
67929 nice = 19;
67930
67931- if (increment < 0 && !can_nice(current, nice))
67932+ if (increment < 0 && (!can_nice(current, nice) ||
67933+ gr_handle_chroot_nice()))
67934 return -EPERM;
67935
67936 retval = security_task_setnice(current, nice);
67937@@ -4097,6 +4100,7 @@ recheck:
67938 unsigned long rlim_rtprio =
67939 task_rlimit(p, RLIMIT_RTPRIO);
67940
67941+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
67942 /* can't set/change the rt policy */
67943 if (policy != p->policy && !rlim_rtprio)
67944 return -EPERM;
67945diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
67946index e955364..eacd2a4 100644
67947--- a/kernel/sched/fair.c
67948+++ b/kernel/sched/fair.c
67949@@ -5107,7 +5107,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
67950 * run_rebalance_domains is triggered when needed from the scheduler tick.
67951 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
67952 */
67953-static void run_rebalance_domains(struct softirq_action *h)
67954+static void run_rebalance_domains(void)
67955 {
67956 int this_cpu = smp_processor_id();
67957 struct rq *this_rq = cpu_rq(this_cpu);
67958diff --git a/kernel/signal.c b/kernel/signal.c
67959index 17afcaf..4500b05 100644
67960--- a/kernel/signal.c
67961+++ b/kernel/signal.c
67962@@ -47,12 +47,12 @@ static struct kmem_cache *sigqueue_cachep;
67963
67964 int print_fatal_signals __read_mostly;
67965
67966-static void __user *sig_handler(struct task_struct *t, int sig)
67967+static __sighandler_t sig_handler(struct task_struct *t, int sig)
67968 {
67969 return t->sighand->action[sig - 1].sa.sa_handler;
67970 }
67971
67972-static int sig_handler_ignored(void __user *handler, int sig)
67973+static int sig_handler_ignored(__sighandler_t handler, int sig)
67974 {
67975 /* Is it explicitly or implicitly ignored? */
67976 return handler == SIG_IGN ||
67977@@ -61,7 +61,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
67978
67979 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
67980 {
67981- void __user *handler;
67982+ __sighandler_t handler;
67983
67984 handler = sig_handler(t, sig);
67985
67986@@ -365,6 +365,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
67987 atomic_inc(&user->sigpending);
67988 rcu_read_unlock();
67989
67990+ if (!override_rlimit)
67991+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
67992+
67993 if (override_rlimit ||
67994 atomic_read(&user->sigpending) <=
67995 task_rlimit(t, RLIMIT_SIGPENDING)) {
67996@@ -489,7 +492,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
67997
67998 int unhandled_signal(struct task_struct *tsk, int sig)
67999 {
68000- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
68001+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
68002 if (is_global_init(tsk))
68003 return 1;
68004 if (handler != SIG_IGN && handler != SIG_DFL)
68005@@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
68006 }
68007 }
68008
68009+ /* allow glibc communication via tgkill to other threads in our
68010+ thread group */
68011+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
68012+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
68013+ && gr_handle_signal(t, sig))
68014+ return -EPERM;
68015+
68016 return security_task_kill(t, info, sig, 0);
68017 }
68018
68019@@ -1204,7 +1214,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
68020 return send_signal(sig, info, p, 1);
68021 }
68022
68023-static int
68024+int
68025 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68026 {
68027 return send_signal(sig, info, t, 0);
68028@@ -1241,6 +1251,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68029 unsigned long int flags;
68030 int ret, blocked, ignored;
68031 struct k_sigaction *action;
68032+ int is_unhandled = 0;
68033
68034 spin_lock_irqsave(&t->sighand->siglock, flags);
68035 action = &t->sighand->action[sig-1];
68036@@ -1255,9 +1266,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68037 }
68038 if (action->sa.sa_handler == SIG_DFL)
68039 t->signal->flags &= ~SIGNAL_UNKILLABLE;
68040+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
68041+ is_unhandled = 1;
68042 ret = specific_send_sig_info(sig, info, t);
68043 spin_unlock_irqrestore(&t->sighand->siglock, flags);
68044
68045+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
68046+ normal operation */
68047+ if (is_unhandled) {
68048+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
68049+ gr_handle_crash(t, sig);
68050+ }
68051+
68052 return ret;
68053 }
68054
68055@@ -1324,8 +1344,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
68056 ret = check_kill_permission(sig, info, p);
68057 rcu_read_unlock();
68058
68059- if (!ret && sig)
68060+ if (!ret && sig) {
68061 ret = do_send_sig_info(sig, info, p, true);
68062+ if (!ret)
68063+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
68064+ }
68065
68066 return ret;
68067 }
68068@@ -2840,7 +2863,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
68069 int error = -ESRCH;
68070
68071 rcu_read_lock();
68072- p = find_task_by_vpid(pid);
68073+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68074+ /* allow glibc communication via tgkill to other threads in our
68075+ thread group */
68076+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
68077+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
68078+ p = find_task_by_vpid_unrestricted(pid);
68079+ else
68080+#endif
68081+ p = find_task_by_vpid(pid);
68082 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
68083 error = check_kill_permission(sig, info, p);
68084 /*
68085diff --git a/kernel/smp.c b/kernel/smp.c
68086index 2f8b10e..a41bc14 100644
68087--- a/kernel/smp.c
68088+++ b/kernel/smp.c
68089@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
68090 }
68091 EXPORT_SYMBOL(smp_call_function);
68092
68093-void ipi_call_lock(void)
68094+void ipi_call_lock(void) __acquires(call_function.lock)
68095 {
68096 raw_spin_lock(&call_function.lock);
68097 }
68098
68099-void ipi_call_unlock(void)
68100+void ipi_call_unlock(void) __releases(call_function.lock)
68101 {
68102 raw_spin_unlock(&call_function.lock);
68103 }
68104
68105-void ipi_call_lock_irq(void)
68106+void ipi_call_lock_irq(void) __acquires(call_function.lock)
68107 {
68108 raw_spin_lock_irq(&call_function.lock);
68109 }
68110
68111-void ipi_call_unlock_irq(void)
68112+void ipi_call_unlock_irq(void) __releases(call_function.lock)
68113 {
68114 raw_spin_unlock_irq(&call_function.lock);
68115 }
68116diff --git a/kernel/softirq.c b/kernel/softirq.c
68117index 671f959..91c51cb 100644
68118--- a/kernel/softirq.c
68119+++ b/kernel/softirq.c
68120@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
68121
68122 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
68123
68124-char *softirq_to_name[NR_SOFTIRQS] = {
68125+const char * const softirq_to_name[NR_SOFTIRQS] = {
68126 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
68127 "TASKLET", "SCHED", "HRTIMER", "RCU"
68128 };
68129@@ -235,7 +235,7 @@ restart:
68130 kstat_incr_softirqs_this_cpu(vec_nr);
68131
68132 trace_softirq_entry(vec_nr);
68133- h->action(h);
68134+ h->action();
68135 trace_softirq_exit(vec_nr);
68136 if (unlikely(prev_count != preempt_count())) {
68137 printk(KERN_ERR "huh, entered softirq %u %s %p"
68138@@ -381,9 +381,11 @@ void __raise_softirq_irqoff(unsigned int nr)
68139 or_softirq_pending(1UL << nr);
68140 }
68141
68142-void open_softirq(int nr, void (*action)(struct softirq_action *))
68143+void open_softirq(int nr, void (*action)(void))
68144 {
68145- softirq_vec[nr].action = action;
68146+ pax_open_kernel();
68147+ *(void **)&softirq_vec[nr].action = action;
68148+ pax_close_kernel();
68149 }
68150
68151 /*
68152@@ -437,7 +439,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
68153
68154 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
68155
68156-static void tasklet_action(struct softirq_action *a)
68157+static void tasklet_action(void)
68158 {
68159 struct tasklet_struct *list;
68160
68161@@ -472,7 +474,7 @@ static void tasklet_action(struct softirq_action *a)
68162 }
68163 }
68164
68165-static void tasklet_hi_action(struct softirq_action *a)
68166+static void tasklet_hi_action(void)
68167 {
68168 struct tasklet_struct *list;
68169
68170diff --git a/kernel/sys.c b/kernel/sys.c
68171index e7006eb..8fb7c51 100644
68172--- a/kernel/sys.c
68173+++ b/kernel/sys.c
68174@@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
68175 error = -EACCES;
68176 goto out;
68177 }
68178+
68179+ if (gr_handle_chroot_setpriority(p, niceval)) {
68180+ error = -EACCES;
68181+ goto out;
68182+ }
68183+
68184 no_nice = security_task_setnice(p, niceval);
68185 if (no_nice) {
68186 error = no_nice;
68187@@ -581,6 +587,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
68188 goto error;
68189 }
68190
68191+ if (gr_check_group_change(new->gid, new->egid, -1))
68192+ goto error;
68193+
68194 if (rgid != (gid_t) -1 ||
68195 (egid != (gid_t) -1 && egid != old->gid))
68196 new->sgid = new->egid;
68197@@ -610,6 +619,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
68198 old = current_cred();
68199
68200 retval = -EPERM;
68201+
68202+ if (gr_check_group_change(gid, gid, gid))
68203+ goto error;
68204+
68205 if (nsown_capable(CAP_SETGID))
68206 new->gid = new->egid = new->sgid = new->fsgid = gid;
68207 else if (gid == old->gid || gid == old->sgid)
68208@@ -627,7 +640,7 @@ error:
68209 /*
68210 * change the user struct in a credentials set to match the new UID
68211 */
68212-static int set_user(struct cred *new)
68213+int set_user(struct cred *new)
68214 {
68215 struct user_struct *new_user;
68216
68217@@ -697,6 +710,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
68218 goto error;
68219 }
68220
68221+ if (gr_check_user_change(new->uid, new->euid, -1))
68222+ goto error;
68223+
68224 if (new->uid != old->uid) {
68225 retval = set_user(new);
68226 if (retval < 0)
68227@@ -741,6 +757,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
68228 old = current_cred();
68229
68230 retval = -EPERM;
68231+
68232+ if (gr_check_crash_uid(uid))
68233+ goto error;
68234+ if (gr_check_user_change(uid, uid, uid))
68235+ goto error;
68236+
68237 if (nsown_capable(CAP_SETUID)) {
68238 new->suid = new->uid = uid;
68239 if (uid != old->uid) {
68240@@ -795,6 +817,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
68241 goto error;
68242 }
68243
68244+ if (gr_check_user_change(ruid, euid, -1))
68245+ goto error;
68246+
68247 if (ruid != (uid_t) -1) {
68248 new->uid = ruid;
68249 if (ruid != old->uid) {
68250@@ -859,6 +884,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
68251 goto error;
68252 }
68253
68254+ if (gr_check_group_change(rgid, egid, -1))
68255+ goto error;
68256+
68257 if (rgid != (gid_t) -1)
68258 new->gid = rgid;
68259 if (egid != (gid_t) -1)
68260@@ -905,6 +933,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
68261 old = current_cred();
68262 old_fsuid = old->fsuid;
68263
68264+ if (gr_check_user_change(-1, -1, uid))
68265+ goto error;
68266+
68267 if (uid == old->uid || uid == old->euid ||
68268 uid == old->suid || uid == old->fsuid ||
68269 nsown_capable(CAP_SETUID)) {
68270@@ -915,6 +946,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
68271 }
68272 }
68273
68274+error:
68275 abort_creds(new);
68276 return old_fsuid;
68277
68278@@ -941,12 +973,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
68279 if (gid == old->gid || gid == old->egid ||
68280 gid == old->sgid || gid == old->fsgid ||
68281 nsown_capable(CAP_SETGID)) {
68282+ if (gr_check_group_change(-1, -1, gid))
68283+ goto error;
68284+
68285 if (gid != old_fsgid) {
68286 new->fsgid = gid;
68287 goto change_okay;
68288 }
68289 }
68290
68291+error:
68292 abort_creds(new);
68293 return old_fsgid;
68294
68295@@ -1198,7 +1234,10 @@ static int override_release(char __user *release, int len)
68296 }
68297 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
68298 snprintf(buf, len, "2.6.%u%s", v, rest);
68299- ret = copy_to_user(release, buf, len);
68300+ if (len > sizeof(buf))
68301+ ret = -EFAULT;
68302+ else
68303+ ret = copy_to_user(release, buf, len);
68304 }
68305 return ret;
68306 }
68307@@ -1252,19 +1291,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
68308 return -EFAULT;
68309
68310 down_read(&uts_sem);
68311- error = __copy_to_user(&name->sysname, &utsname()->sysname,
68312+ error = __copy_to_user(name->sysname, &utsname()->sysname,
68313 __OLD_UTS_LEN);
68314 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
68315- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
68316+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
68317 __OLD_UTS_LEN);
68318 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
68319- error |= __copy_to_user(&name->release, &utsname()->release,
68320+ error |= __copy_to_user(name->release, &utsname()->release,
68321 __OLD_UTS_LEN);
68322 error |= __put_user(0, name->release + __OLD_UTS_LEN);
68323- error |= __copy_to_user(&name->version, &utsname()->version,
68324+ error |= __copy_to_user(name->version, &utsname()->version,
68325 __OLD_UTS_LEN);
68326 error |= __put_user(0, name->version + __OLD_UTS_LEN);
68327- error |= __copy_to_user(&name->machine, &utsname()->machine,
68328+ error |= __copy_to_user(name->machine, &utsname()->machine,
68329 __OLD_UTS_LEN);
68330 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
68331 up_read(&uts_sem);
68332@@ -1847,7 +1886,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
68333 error = get_dumpable(me->mm);
68334 break;
68335 case PR_SET_DUMPABLE:
68336- if (arg2 < 0 || arg2 > 1) {
68337+ if (arg2 > 1) {
68338 error = -EINVAL;
68339 break;
68340 }
68341diff --git a/kernel/sysctl.c b/kernel/sysctl.c
68342index 4ab1187..0b75ced 100644
68343--- a/kernel/sysctl.c
68344+++ b/kernel/sysctl.c
68345@@ -91,7 +91,6 @@
68346
68347
68348 #if defined(CONFIG_SYSCTL)
68349-
68350 /* External variables not in a header file. */
68351 extern int sysctl_overcommit_memory;
68352 extern int sysctl_overcommit_ratio;
68353@@ -169,10 +168,8 @@ static int proc_taint(struct ctl_table *table, int write,
68354 void __user *buffer, size_t *lenp, loff_t *ppos);
68355 #endif
68356
68357-#ifdef CONFIG_PRINTK
68358 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
68359 void __user *buffer, size_t *lenp, loff_t *ppos);
68360-#endif
68361
68362 #ifdef CONFIG_MAGIC_SYSRQ
68363 /* Note: sysrq code uses it's own private copy */
68364@@ -196,6 +193,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
68365
68366 #endif
68367
68368+extern struct ctl_table grsecurity_table[];
68369+
68370 static struct ctl_table kern_table[];
68371 static struct ctl_table vm_table[];
68372 static struct ctl_table fs_table[];
68373@@ -210,6 +209,20 @@ extern struct ctl_table epoll_table[];
68374 int sysctl_legacy_va_layout;
68375 #endif
68376
68377+#ifdef CONFIG_PAX_SOFTMODE
68378+static ctl_table pax_table[] = {
68379+ {
68380+ .procname = "softmode",
68381+ .data = &pax_softmode,
68382+ .maxlen = sizeof(unsigned int),
68383+ .mode = 0600,
68384+ .proc_handler = &proc_dointvec,
68385+ },
68386+
68387+ { }
68388+};
68389+#endif
68390+
68391 /* The default sysctl tables: */
68392
68393 static struct ctl_table sysctl_base_table[] = {
68394@@ -256,6 +269,22 @@ static int max_extfrag_threshold = 1000;
68395 #endif
68396
68397 static struct ctl_table kern_table[] = {
68398+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
68399+ {
68400+ .procname = "grsecurity",
68401+ .mode = 0500,
68402+ .child = grsecurity_table,
68403+ },
68404+#endif
68405+
68406+#ifdef CONFIG_PAX_SOFTMODE
68407+ {
68408+ .procname = "pax",
68409+ .mode = 0500,
68410+ .child = pax_table,
68411+ },
68412+#endif
68413+
68414 {
68415 .procname = "sched_child_runs_first",
68416 .data = &sysctl_sched_child_runs_first,
68417@@ -540,7 +569,7 @@ static struct ctl_table kern_table[] = {
68418 .data = &modprobe_path,
68419 .maxlen = KMOD_PATH_LEN,
68420 .mode = 0644,
68421- .proc_handler = proc_dostring,
68422+ .proc_handler = proc_dostring_modpriv,
68423 },
68424 {
68425 .procname = "modules_disabled",
68426@@ -707,16 +736,20 @@ static struct ctl_table kern_table[] = {
68427 .extra1 = &zero,
68428 .extra2 = &one,
68429 },
68430+#endif
68431 {
68432 .procname = "kptr_restrict",
68433 .data = &kptr_restrict,
68434 .maxlen = sizeof(int),
68435 .mode = 0644,
68436 .proc_handler = proc_dointvec_minmax_sysadmin,
68437+#ifdef CONFIG_GRKERNSEC_HIDESYM
68438+ .extra1 = &two,
68439+#else
68440 .extra1 = &zero,
68441+#endif
68442 .extra2 = &two,
68443 },
68444-#endif
68445 {
68446 .procname = "ngroups_max",
68447 .data = &ngroups_max,
68448@@ -1215,6 +1248,13 @@ static struct ctl_table vm_table[] = {
68449 .proc_handler = proc_dointvec_minmax,
68450 .extra1 = &zero,
68451 },
68452+ {
68453+ .procname = "heap_stack_gap",
68454+ .data = &sysctl_heap_stack_gap,
68455+ .maxlen = sizeof(sysctl_heap_stack_gap),
68456+ .mode = 0644,
68457+ .proc_handler = proc_doulongvec_minmax,
68458+ },
68459 #else
68460 {
68461 .procname = "nr_trim_pages",
68462@@ -1645,6 +1685,16 @@ int proc_dostring(struct ctl_table *table, int write,
68463 buffer, lenp, ppos);
68464 }
68465
68466+int proc_dostring_modpriv(struct ctl_table *table, int write,
68467+ void __user *buffer, size_t *lenp, loff_t *ppos)
68468+{
68469+ if (write && !capable(CAP_SYS_MODULE))
68470+ return -EPERM;
68471+
68472+ return _proc_do_string(table->data, table->maxlen, write,
68473+ buffer, lenp, ppos);
68474+}
68475+
68476 static size_t proc_skip_spaces(char **buf)
68477 {
68478 size_t ret;
68479@@ -1750,6 +1800,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
68480 len = strlen(tmp);
68481 if (len > *size)
68482 len = *size;
68483+ if (len > sizeof(tmp))
68484+ len = sizeof(tmp);
68485 if (copy_to_user(*buf, tmp, len))
68486 return -EFAULT;
68487 *size -= len;
68488@@ -1942,7 +1994,6 @@ static int proc_taint(struct ctl_table *table, int write,
68489 return err;
68490 }
68491
68492-#ifdef CONFIG_PRINTK
68493 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
68494 void __user *buffer, size_t *lenp, loff_t *ppos)
68495 {
68496@@ -1951,7 +2002,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
68497
68498 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
68499 }
68500-#endif
68501
68502 struct do_proc_dointvec_minmax_conv_param {
68503 int *min;
68504@@ -2066,8 +2116,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
68505 *i = val;
68506 } else {
68507 val = convdiv * (*i) / convmul;
68508- if (!first)
68509+ if (!first) {
68510 err = proc_put_char(&buffer, &left, '\t');
68511+ if (err)
68512+ break;
68513+ }
68514 err = proc_put_long(&buffer, &left, val, false);
68515 if (err)
68516 break;
68517@@ -2459,6 +2512,12 @@ int proc_dostring(struct ctl_table *table, int write,
68518 return -ENOSYS;
68519 }
68520
68521+int proc_dostring_modpriv(struct ctl_table *table, int write,
68522+ void __user *buffer, size_t *lenp, loff_t *ppos)
68523+{
68524+ return -ENOSYS;
68525+}
68526+
68527 int proc_dointvec(struct ctl_table *table, int write,
68528 void __user *buffer, size_t *lenp, loff_t *ppos)
68529 {
68530@@ -2515,5 +2574,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
68531 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
68532 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
68533 EXPORT_SYMBOL(proc_dostring);
68534+EXPORT_SYMBOL(proc_dostring_modpriv);
68535 EXPORT_SYMBOL(proc_doulongvec_minmax);
68536 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
68537diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
68538index a650694..aaeeb20 100644
68539--- a/kernel/sysctl_binary.c
68540+++ b/kernel/sysctl_binary.c
68541@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
68542 int i;
68543
68544 set_fs(KERNEL_DS);
68545- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
68546+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
68547 set_fs(old_fs);
68548 if (result < 0)
68549 goto out_kfree;
68550@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
68551 }
68552
68553 set_fs(KERNEL_DS);
68554- result = vfs_write(file, buffer, str - buffer, &pos);
68555+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
68556 set_fs(old_fs);
68557 if (result < 0)
68558 goto out_kfree;
68559@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
68560 int i;
68561
68562 set_fs(KERNEL_DS);
68563- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
68564+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
68565 set_fs(old_fs);
68566 if (result < 0)
68567 goto out_kfree;
68568@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
68569 }
68570
68571 set_fs(KERNEL_DS);
68572- result = vfs_write(file, buffer, str - buffer, &pos);
68573+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
68574 set_fs(old_fs);
68575 if (result < 0)
68576 goto out_kfree;
68577@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
68578 int i;
68579
68580 set_fs(KERNEL_DS);
68581- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
68582+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
68583 set_fs(old_fs);
68584 if (result < 0)
68585 goto out;
68586@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
68587 __le16 dnaddr;
68588
68589 set_fs(KERNEL_DS);
68590- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
68591+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
68592 set_fs(old_fs);
68593 if (result < 0)
68594 goto out;
68595@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
68596 le16_to_cpu(dnaddr) & 0x3ff);
68597
68598 set_fs(KERNEL_DS);
68599- result = vfs_write(file, buf, len, &pos);
68600+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
68601 set_fs(old_fs);
68602 if (result < 0)
68603 goto out;
68604diff --git a/kernel/taskstats.c b/kernel/taskstats.c
68605index e660464..c8b9e67 100644
68606--- a/kernel/taskstats.c
68607+++ b/kernel/taskstats.c
68608@@ -27,9 +27,12 @@
68609 #include <linux/cgroup.h>
68610 #include <linux/fs.h>
68611 #include <linux/file.h>
68612+#include <linux/grsecurity.h>
68613 #include <net/genetlink.h>
68614 #include <linux/atomic.h>
68615
68616+extern int gr_is_taskstats_denied(int pid);
68617+
68618 /*
68619 * Maximum length of a cpumask that can be specified in
68620 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
68621@@ -556,6 +559,9 @@ err:
68622
68623 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
68624 {
68625+ if (gr_is_taskstats_denied(current->pid))
68626+ return -EACCES;
68627+
68628 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
68629 return cmd_attr_register_cpumask(info);
68630 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
68631diff --git a/kernel/time.c b/kernel/time.c
68632index ba744cf..267b7c5 100644
68633--- a/kernel/time.c
68634+++ b/kernel/time.c
68635@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
68636 return error;
68637
68638 if (tz) {
68639+ /* we log in do_settimeofday called below, so don't log twice
68640+ */
68641+ if (!tv)
68642+ gr_log_timechange();
68643+
68644 sys_tz = *tz;
68645 update_vsyscall_tz();
68646 if (firsttime) {
68647diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
68648index 8a538c5..def79d4 100644
68649--- a/kernel/time/alarmtimer.c
68650+++ b/kernel/time/alarmtimer.c
68651@@ -779,7 +779,7 @@ static int __init alarmtimer_init(void)
68652 struct platform_device *pdev;
68653 int error = 0;
68654 int i;
68655- struct k_clock alarm_clock = {
68656+ static struct k_clock alarm_clock = {
68657 .clock_getres = alarm_clock_getres,
68658 .clock_get = alarm_clock_get,
68659 .timer_create = alarm_timer_create,
68660diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
68661index f113755..ec24223 100644
68662--- a/kernel/time/tick-broadcast.c
68663+++ b/kernel/time/tick-broadcast.c
68664@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
68665 * then clear the broadcast bit.
68666 */
68667 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
68668- int cpu = smp_processor_id();
68669+ cpu = smp_processor_id();
68670
68671 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
68672 tick_broadcast_clear_oneshot(cpu);
68673diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
68674index d42574df..247414c 100644
68675--- a/kernel/time/timekeeping.c
68676+++ b/kernel/time/timekeeping.c
68677@@ -14,6 +14,7 @@
68678 #include <linux/init.h>
68679 #include <linux/mm.h>
68680 #include <linux/sched.h>
68681+#include <linux/grsecurity.h>
68682 #include <linux/syscore_ops.h>
68683 #include <linux/clocksource.h>
68684 #include <linux/jiffies.h>
68685@@ -373,6 +374,8 @@ int do_settimeofday(const struct timespec *tv)
68686 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
68687 return -EINVAL;
68688
68689+ gr_log_timechange();
68690+
68691 write_seqlock_irqsave(&timekeeper.lock, flags);
68692
68693 timekeeping_forward_now();
68694diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
68695index 3258455..f35227d 100644
68696--- a/kernel/time/timer_list.c
68697+++ b/kernel/time/timer_list.c
68698@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
68699
68700 static void print_name_offset(struct seq_file *m, void *sym)
68701 {
68702+#ifdef CONFIG_GRKERNSEC_HIDESYM
68703+ SEQ_printf(m, "<%p>", NULL);
68704+#else
68705 char symname[KSYM_NAME_LEN];
68706
68707 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
68708 SEQ_printf(m, "<%pK>", sym);
68709 else
68710 SEQ_printf(m, "%s", symname);
68711+#endif
68712 }
68713
68714 static void
68715@@ -112,7 +116,11 @@ next_one:
68716 static void
68717 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
68718 {
68719+#ifdef CONFIG_GRKERNSEC_HIDESYM
68720+ SEQ_printf(m, " .base: %p\n", NULL);
68721+#else
68722 SEQ_printf(m, " .base: %pK\n", base);
68723+#endif
68724 SEQ_printf(m, " .index: %d\n",
68725 base->index);
68726 SEQ_printf(m, " .resolution: %Lu nsecs\n",
68727@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
68728 {
68729 struct proc_dir_entry *pe;
68730
68731+#ifdef CONFIG_GRKERNSEC_PROC_ADD
68732+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
68733+#else
68734 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
68735+#endif
68736 if (!pe)
68737 return -ENOMEM;
68738 return 0;
68739diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
68740index 0b537f2..9e71eca 100644
68741--- a/kernel/time/timer_stats.c
68742+++ b/kernel/time/timer_stats.c
68743@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
68744 static unsigned long nr_entries;
68745 static struct entry entries[MAX_ENTRIES];
68746
68747-static atomic_t overflow_count;
68748+static atomic_unchecked_t overflow_count;
68749
68750 /*
68751 * The entries are in a hash-table, for fast lookup:
68752@@ -140,7 +140,7 @@ static void reset_entries(void)
68753 nr_entries = 0;
68754 memset(entries, 0, sizeof(entries));
68755 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
68756- atomic_set(&overflow_count, 0);
68757+ atomic_set_unchecked(&overflow_count, 0);
68758 }
68759
68760 static struct entry *alloc_entry(void)
68761@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
68762 if (likely(entry))
68763 entry->count++;
68764 else
68765- atomic_inc(&overflow_count);
68766+ atomic_inc_unchecked(&overflow_count);
68767
68768 out_unlock:
68769 raw_spin_unlock_irqrestore(lock, flags);
68770@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
68771
68772 static void print_name_offset(struct seq_file *m, unsigned long addr)
68773 {
68774+#ifdef CONFIG_GRKERNSEC_HIDESYM
68775+ seq_printf(m, "<%p>", NULL);
68776+#else
68777 char symname[KSYM_NAME_LEN];
68778
68779 if (lookup_symbol_name(addr, symname) < 0)
68780 seq_printf(m, "<%p>", (void *)addr);
68781 else
68782 seq_printf(m, "%s", symname);
68783+#endif
68784 }
68785
68786 static int tstats_show(struct seq_file *m, void *v)
68787@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
68788
68789 seq_puts(m, "Timer Stats Version: v0.2\n");
68790 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
68791- if (atomic_read(&overflow_count))
68792+ if (atomic_read_unchecked(&overflow_count))
68793 seq_printf(m, "Overflow: %d entries\n",
68794- atomic_read(&overflow_count));
68795+ atomic_read_unchecked(&overflow_count));
68796
68797 for (i = 0; i < nr_entries; i++) {
68798 entry = entries + i;
68799@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
68800 {
68801 struct proc_dir_entry *pe;
68802
68803+#ifdef CONFIG_GRKERNSEC_PROC_ADD
68804+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
68805+#else
68806 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
68807+#endif
68808 if (!pe)
68809 return -ENOMEM;
68810 return 0;
68811diff --git a/kernel/timer.c b/kernel/timer.c
68812index a297ffc..5e16b0b 100644
68813--- a/kernel/timer.c
68814+++ b/kernel/timer.c
68815@@ -1354,7 +1354,7 @@ void update_process_times(int user_tick)
68816 /*
68817 * This function runs timers and the timer-tq in bottom half context.
68818 */
68819-static void run_timer_softirq(struct softirq_action *h)
68820+static void run_timer_softirq(void)
68821 {
68822 struct tvec_base *base = __this_cpu_read(tvec_bases);
68823
68824diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
68825index c0bd030..62a1927 100644
68826--- a/kernel/trace/blktrace.c
68827+++ b/kernel/trace/blktrace.c
68828@@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
68829 struct blk_trace *bt = filp->private_data;
68830 char buf[16];
68831
68832- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
68833+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
68834
68835 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
68836 }
68837@@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
68838 return 1;
68839
68840 bt = buf->chan->private_data;
68841- atomic_inc(&bt->dropped);
68842+ atomic_inc_unchecked(&bt->dropped);
68843 return 0;
68844 }
68845
68846@@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
68847
68848 bt->dir = dir;
68849 bt->dev = dev;
68850- atomic_set(&bt->dropped, 0);
68851+ atomic_set_unchecked(&bt->dropped, 0);
68852
68853 ret = -EIO;
68854 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
68855diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
68856index 0fa92f6..89950b2 100644
68857--- a/kernel/trace/ftrace.c
68858+++ b/kernel/trace/ftrace.c
68859@@ -1800,12 +1800,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
68860 if (unlikely(ftrace_disabled))
68861 return 0;
68862
68863+ ret = ftrace_arch_code_modify_prepare();
68864+ FTRACE_WARN_ON(ret);
68865+ if (ret)
68866+ return 0;
68867+
68868 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
68869+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
68870 if (ret) {
68871 ftrace_bug(ret, ip);
68872- return 0;
68873 }
68874- return 1;
68875+ return ret ? 0 : 1;
68876 }
68877
68878 /*
68879@@ -2917,7 +2922,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
68880
68881 int
68882 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
68883- void *data)
68884+ void *data)
68885 {
68886 struct ftrace_func_probe *entry;
68887 struct ftrace_page *pg;
68888diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
68889index 2a22255..cdcdd06 100644
68890--- a/kernel/trace/trace.c
68891+++ b/kernel/trace/trace.c
68892@@ -4312,10 +4312,9 @@ static const struct file_operations tracing_dyn_info_fops = {
68893 };
68894 #endif
68895
68896-static struct dentry *d_tracer;
68897-
68898 struct dentry *tracing_init_dentry(void)
68899 {
68900+ static struct dentry *d_tracer;
68901 static int once;
68902
68903 if (d_tracer)
68904@@ -4335,10 +4334,9 @@ struct dentry *tracing_init_dentry(void)
68905 return d_tracer;
68906 }
68907
68908-static struct dentry *d_percpu;
68909-
68910 struct dentry *tracing_dentry_percpu(void)
68911 {
68912+ static struct dentry *d_percpu;
68913 static int once;
68914 struct dentry *d_tracer;
68915
68916diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
68917index 29111da..d190fe2 100644
68918--- a/kernel/trace/trace_events.c
68919+++ b/kernel/trace/trace_events.c
68920@@ -1308,10 +1308,6 @@ static LIST_HEAD(ftrace_module_file_list);
68921 struct ftrace_module_file_ops {
68922 struct list_head list;
68923 struct module *mod;
68924- struct file_operations id;
68925- struct file_operations enable;
68926- struct file_operations format;
68927- struct file_operations filter;
68928 };
68929
68930 static struct ftrace_module_file_ops *
68931@@ -1332,17 +1328,12 @@ trace_create_file_ops(struct module *mod)
68932
68933 file_ops->mod = mod;
68934
68935- file_ops->id = ftrace_event_id_fops;
68936- file_ops->id.owner = mod;
68937-
68938- file_ops->enable = ftrace_enable_fops;
68939- file_ops->enable.owner = mod;
68940-
68941- file_ops->filter = ftrace_event_filter_fops;
68942- file_ops->filter.owner = mod;
68943-
68944- file_ops->format = ftrace_event_format_fops;
68945- file_ops->format.owner = mod;
68946+ pax_open_kernel();
68947+ *(void **)&mod->trace_id.owner = mod;
68948+ *(void **)&mod->trace_enable.owner = mod;
68949+ *(void **)&mod->trace_filter.owner = mod;
68950+ *(void **)&mod->trace_format.owner = mod;
68951+ pax_close_kernel();
68952
68953 list_add(&file_ops->list, &ftrace_module_file_list);
68954
68955@@ -1366,8 +1357,8 @@ static void trace_module_add_events(struct module *mod)
68956
68957 for_each_event(call, start, end) {
68958 __trace_add_event_call(*call, mod,
68959- &file_ops->id, &file_ops->enable,
68960- &file_ops->filter, &file_ops->format);
68961+ &mod->trace_id, &mod->trace_enable,
68962+ &mod->trace_filter, &mod->trace_format);
68963 }
68964 }
68965
68966diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
68967index 580a05e..9b31acb 100644
68968--- a/kernel/trace/trace_kprobe.c
68969+++ b/kernel/trace/trace_kprobe.c
68970@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
68971 long ret;
68972 int maxlen = get_rloc_len(*(u32 *)dest);
68973 u8 *dst = get_rloc_data(dest);
68974- u8 *src = addr;
68975+ const u8 __user *src = (const u8 __force_user *)addr;
68976 mm_segment_t old_fs = get_fs();
68977 if (!maxlen)
68978 return;
68979@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
68980 pagefault_disable();
68981 do
68982 ret = __copy_from_user_inatomic(dst++, src++, 1);
68983- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
68984+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
68985 dst[-1] = '\0';
68986 pagefault_enable();
68987 set_fs(old_fs);
68988@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
68989 ((u8 *)get_rloc_data(dest))[0] = '\0';
68990 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
68991 } else
68992- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
68993+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
68994 get_rloc_offs(*(u32 *)dest));
68995 }
68996 /* Return the length of string -- including null terminal byte */
68997@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
68998 set_fs(KERNEL_DS);
68999 pagefault_disable();
69000 do {
69001- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
69002+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
69003 len++;
69004 } while (c && ret == 0 && len < MAX_STRING_SIZE);
69005 pagefault_enable();
69006diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
69007index fd3c8aa..5f324a6 100644
69008--- a/kernel/trace/trace_mmiotrace.c
69009+++ b/kernel/trace/trace_mmiotrace.c
69010@@ -24,7 +24,7 @@ struct header_iter {
69011 static struct trace_array *mmio_trace_array;
69012 static bool overrun_detected;
69013 static unsigned long prev_overruns;
69014-static atomic_t dropped_count;
69015+static atomic_unchecked_t dropped_count;
69016
69017 static void mmio_reset_data(struct trace_array *tr)
69018 {
69019@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
69020
69021 static unsigned long count_overruns(struct trace_iterator *iter)
69022 {
69023- unsigned long cnt = atomic_xchg(&dropped_count, 0);
69024+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
69025 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
69026
69027 if (over > prev_overruns)
69028@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
69029 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
69030 sizeof(*entry), 0, pc);
69031 if (!event) {
69032- atomic_inc(&dropped_count);
69033+ atomic_inc_unchecked(&dropped_count);
69034 return;
69035 }
69036 entry = ring_buffer_event_data(event);
69037@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
69038 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
69039 sizeof(*entry), 0, pc);
69040 if (!event) {
69041- atomic_inc(&dropped_count);
69042+ atomic_inc_unchecked(&dropped_count);
69043 return;
69044 }
69045 entry = ring_buffer_event_data(event);
69046diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
69047index df611a0..10d8b32 100644
69048--- a/kernel/trace/trace_output.c
69049+++ b/kernel/trace/trace_output.c
69050@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
69051
69052 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
69053 if (!IS_ERR(p)) {
69054- p = mangle_path(s->buffer + s->len, p, "\n");
69055+ p = mangle_path(s->buffer + s->len, p, "\n\\");
69056 if (p) {
69057 s->len = p - s->buffer;
69058 return 1;
69059diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
69060index d4545f4..a9010a1 100644
69061--- a/kernel/trace/trace_stack.c
69062+++ b/kernel/trace/trace_stack.c
69063@@ -53,7 +53,7 @@ static inline void check_stack(void)
69064 return;
69065
69066 /* we do not handle interrupt stacks yet */
69067- if (!object_is_on_stack(&this_size))
69068+ if (!object_starts_on_stack(&this_size))
69069 return;
69070
69071 local_irq_save(flags);
69072diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
69073index 209b379..7f76423 100644
69074--- a/kernel/trace/trace_workqueue.c
69075+++ b/kernel/trace/trace_workqueue.c
69076@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
69077 int cpu;
69078 pid_t pid;
69079 /* Can be inserted from interrupt or user context, need to be atomic */
69080- atomic_t inserted;
69081+ atomic_unchecked_t inserted;
69082 /*
69083 * Don't need to be atomic, works are serialized in a single workqueue thread
69084 * on a single CPU.
69085@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
69086 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
69087 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
69088 if (node->pid == wq_thread->pid) {
69089- atomic_inc(&node->inserted);
69090+ atomic_inc_unchecked(&node->inserted);
69091 goto found;
69092 }
69093 }
69094@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
69095 tsk = get_pid_task(pid, PIDTYPE_PID);
69096 if (tsk) {
69097 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
69098- atomic_read(&cws->inserted), cws->executed,
69099+ atomic_read_unchecked(&cws->inserted), cws->executed,
69100 tsk->comm);
69101 put_task_struct(tsk);
69102 }
69103diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
69104index 6777153..8519f60 100644
69105--- a/lib/Kconfig.debug
69106+++ b/lib/Kconfig.debug
69107@@ -1132,6 +1132,7 @@ config LATENCYTOP
69108 depends on DEBUG_KERNEL
69109 depends on STACKTRACE_SUPPORT
69110 depends on PROC_FS
69111+ depends on !GRKERNSEC_HIDESYM
69112 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
69113 select KALLSYMS
69114 select KALLSYMS_ALL
69115diff --git a/lib/bitmap.c b/lib/bitmap.c
69116index b5a8b6a..a69623c 100644
69117--- a/lib/bitmap.c
69118+++ b/lib/bitmap.c
69119@@ -421,7 +421,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
69120 {
69121 int c, old_c, totaldigits, ndigits, nchunks, nbits;
69122 u32 chunk;
69123- const char __user __force *ubuf = (const char __user __force *)buf;
69124+ const char __user *ubuf = (const char __force_user *)buf;
69125
69126 bitmap_zero(maskp, nmaskbits);
69127
69128@@ -506,7 +506,7 @@ int bitmap_parse_user(const char __user *ubuf,
69129 {
69130 if (!access_ok(VERIFY_READ, ubuf, ulen))
69131 return -EFAULT;
69132- return __bitmap_parse((const char __force *)ubuf,
69133+ return __bitmap_parse((const char __force_kernel *)ubuf,
69134 ulen, 1, maskp, nmaskbits);
69135
69136 }
69137@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
69138 {
69139 unsigned a, b;
69140 int c, old_c, totaldigits;
69141- const char __user __force *ubuf = (const char __user __force *)buf;
69142+ const char __user *ubuf = (const char __force_user *)buf;
69143 int exp_digit, in_range;
69144
69145 totaldigits = c = 0;
69146@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
69147 {
69148 if (!access_ok(VERIFY_READ, ubuf, ulen))
69149 return -EFAULT;
69150- return __bitmap_parselist((const char __force *)ubuf,
69151+ return __bitmap_parselist((const char __force_kernel *)ubuf,
69152 ulen, 1, maskp, nmaskbits);
69153 }
69154 EXPORT_SYMBOL(bitmap_parselist_user);
69155diff --git a/lib/bug.c b/lib/bug.c
69156index a28c141..2bd3d95 100644
69157--- a/lib/bug.c
69158+++ b/lib/bug.c
69159@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
69160 return BUG_TRAP_TYPE_NONE;
69161
69162 bug = find_bug(bugaddr);
69163+ if (!bug)
69164+ return BUG_TRAP_TYPE_NONE;
69165
69166 file = NULL;
69167 line = 0;
69168diff --git a/lib/debugobjects.c b/lib/debugobjects.c
69169index 0ab9ae8..f01ceca 100644
69170--- a/lib/debugobjects.c
69171+++ b/lib/debugobjects.c
69172@@ -288,7 +288,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
69173 if (limit > 4)
69174 return;
69175
69176- is_on_stack = object_is_on_stack(addr);
69177+ is_on_stack = object_starts_on_stack(addr);
69178 if (is_on_stack == onstack)
69179 return;
69180
69181diff --git a/lib/devres.c b/lib/devres.c
69182index 80b9c76..9e32279 100644
69183--- a/lib/devres.c
69184+++ b/lib/devres.c
69185@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
69186 void devm_iounmap(struct device *dev, void __iomem *addr)
69187 {
69188 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
69189- (void *)addr));
69190+ (void __force *)addr));
69191 iounmap(addr);
69192 }
69193 EXPORT_SYMBOL(devm_iounmap);
69194@@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
69195 {
69196 ioport_unmap(addr);
69197 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
69198- devm_ioport_map_match, (void *)addr));
69199+ devm_ioport_map_match, (void __force *)addr));
69200 }
69201 EXPORT_SYMBOL(devm_ioport_unmap);
69202
69203diff --git a/lib/dma-debug.c b/lib/dma-debug.c
69204index 13ef233..5241683 100644
69205--- a/lib/dma-debug.c
69206+++ b/lib/dma-debug.c
69207@@ -924,7 +924,7 @@ out:
69208
69209 static void check_for_stack(struct device *dev, void *addr)
69210 {
69211- if (object_is_on_stack(addr))
69212+ if (object_starts_on_stack(addr))
69213 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
69214 "stack [addr=%p]\n", addr);
69215 }
69216diff --git a/lib/extable.c b/lib/extable.c
69217index 4cac81e..63e9b8f 100644
69218--- a/lib/extable.c
69219+++ b/lib/extable.c
69220@@ -13,6 +13,7 @@
69221 #include <linux/init.h>
69222 #include <linux/sort.h>
69223 #include <asm/uaccess.h>
69224+#include <asm/pgtable.h>
69225
69226 #ifndef ARCH_HAS_SORT_EXTABLE
69227 /*
69228@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
69229 void sort_extable(struct exception_table_entry *start,
69230 struct exception_table_entry *finish)
69231 {
69232+ pax_open_kernel();
69233 sort(start, finish - start, sizeof(struct exception_table_entry),
69234 cmp_ex, NULL);
69235+ pax_close_kernel();
69236 }
69237
69238 #ifdef CONFIG_MODULES
69239diff --git a/lib/inflate.c b/lib/inflate.c
69240index 013a761..c28f3fc 100644
69241--- a/lib/inflate.c
69242+++ b/lib/inflate.c
69243@@ -269,7 +269,7 @@ static void free(void *where)
69244 malloc_ptr = free_mem_ptr;
69245 }
69246 #else
69247-#define malloc(a) kmalloc(a, GFP_KERNEL)
69248+#define malloc(a) kmalloc((a), GFP_KERNEL)
69249 #define free(a) kfree(a)
69250 #endif
69251
69252diff --git a/lib/ioremap.c b/lib/ioremap.c
69253index 0c9216c..863bd89 100644
69254--- a/lib/ioremap.c
69255+++ b/lib/ioremap.c
69256@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
69257 unsigned long next;
69258
69259 phys_addr -= addr;
69260- pmd = pmd_alloc(&init_mm, pud, addr);
69261+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
69262 if (!pmd)
69263 return -ENOMEM;
69264 do {
69265@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
69266 unsigned long next;
69267
69268 phys_addr -= addr;
69269- pud = pud_alloc(&init_mm, pgd, addr);
69270+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
69271 if (!pud)
69272 return -ENOMEM;
69273 do {
69274diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
69275index bd2bea9..6b3c95e 100644
69276--- a/lib/is_single_threaded.c
69277+++ b/lib/is_single_threaded.c
69278@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
69279 struct task_struct *p, *t;
69280 bool ret;
69281
69282+ if (!mm)
69283+ return true;
69284+
69285 if (atomic_read(&task->signal->live) != 1)
69286 return false;
69287
69288diff --git a/lib/radix-tree.c b/lib/radix-tree.c
69289index 3ac50dc..240bb7e 100644
69290--- a/lib/radix-tree.c
69291+++ b/lib/radix-tree.c
69292@@ -79,7 +79,7 @@ struct radix_tree_preload {
69293 int nr;
69294 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
69295 };
69296-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
69297+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
69298
69299 static inline void *ptr_to_indirect(void *ptr)
69300 {
69301diff --git a/lib/vsprintf.c b/lib/vsprintf.c
69302index abbabec..362988d 100644
69303--- a/lib/vsprintf.c
69304+++ b/lib/vsprintf.c
69305@@ -16,6 +16,9 @@
69306 * - scnprintf and vscnprintf
69307 */
69308
69309+#ifdef CONFIG_GRKERNSEC_HIDESYM
69310+#define __INCLUDED_BY_HIDESYM 1
69311+#endif
69312 #include <stdarg.h>
69313 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
69314 #include <linux/types.h>
69315@@ -433,7 +436,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
69316 char sym[KSYM_SYMBOL_LEN];
69317 if (ext == 'B')
69318 sprint_backtrace(sym, value);
69319- else if (ext != 'f' && ext != 's')
69320+ else if (ext != 'f' && ext != 's' && ext != 'a')
69321 sprint_symbol(sym, value);
69322 else
69323 kallsyms_lookup(value, NULL, NULL, NULL, sym);
69324@@ -809,7 +812,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
69325 return number(buf, end, *(const netdev_features_t *)addr, spec);
69326 }
69327
69328+#ifdef CONFIG_GRKERNSEC_HIDESYM
69329+int kptr_restrict __read_mostly = 2;
69330+#else
69331 int kptr_restrict __read_mostly;
69332+#endif
69333
69334 /*
69335 * Show a '%p' thing. A kernel extension is that the '%p' is followed
69336@@ -823,6 +830,8 @@ int kptr_restrict __read_mostly;
69337 * - 'S' For symbolic direct pointers with offset
69338 * - 's' For symbolic direct pointers without offset
69339 * - 'B' For backtraced symbolic direct pointers with offset
69340+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
69341+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
69342 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
69343 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
69344 * - 'M' For a 6-byte MAC address, it prints the address in the
69345@@ -868,12 +877,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
69346 {
69347 if (!ptr && *fmt != 'K') {
69348 /*
69349- * Print (null) with the same width as a pointer so it makes
69350+ * Print (nil) with the same width as a pointer so it makes
69351 * tabular output look nice.
69352 */
69353 if (spec.field_width == -1)
69354 spec.field_width = 2 * sizeof(void *);
69355- return string(buf, end, "(null)", spec);
69356+ return string(buf, end, "(nil)", spec);
69357 }
69358
69359 switch (*fmt) {
69360@@ -883,6 +892,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
69361 /* Fallthrough */
69362 case 'S':
69363 case 's':
69364+#ifdef CONFIG_GRKERNSEC_HIDESYM
69365+ break;
69366+#else
69367+ return symbol_string(buf, end, ptr, spec, *fmt);
69368+#endif
69369+ case 'A':
69370+ case 'a':
69371 case 'B':
69372 return symbol_string(buf, end, ptr, spec, *fmt);
69373 case 'R':
69374@@ -1653,11 +1669,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
69375 typeof(type) value; \
69376 if (sizeof(type) == 8) { \
69377 args = PTR_ALIGN(args, sizeof(u32)); \
69378- *(u32 *)&value = *(u32 *)args; \
69379- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
69380+ *(u32 *)&value = *(const u32 *)args; \
69381+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
69382 } else { \
69383 args = PTR_ALIGN(args, sizeof(type)); \
69384- value = *(typeof(type) *)args; \
69385+ value = *(const typeof(type) *)args; \
69386 } \
69387 args += sizeof(type); \
69388 value; \
69389@@ -1720,7 +1736,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
69390 case FORMAT_TYPE_STR: {
69391 const char *str_arg = args;
69392 args += strlen(str_arg) + 1;
69393- str = string(str, end, (char *)str_arg, spec);
69394+ str = string(str, end, str_arg, spec);
69395 break;
69396 }
69397
69398diff --git a/localversion-grsec b/localversion-grsec
69399new file mode 100644
69400index 0000000..7cd6065
69401--- /dev/null
69402+++ b/localversion-grsec
69403@@ -0,0 +1 @@
69404+-grsec
69405diff --git a/mm/Kconfig b/mm/Kconfig
69406index e338407..4210331 100644
69407--- a/mm/Kconfig
69408+++ b/mm/Kconfig
69409@@ -247,10 +247,10 @@ config KSM
69410 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
69411
69412 config DEFAULT_MMAP_MIN_ADDR
69413- int "Low address space to protect from user allocation"
69414+ int "Low address space to protect from user allocation"
69415 depends on MMU
69416- default 4096
69417- help
69418+ default 65536
69419+ help
69420 This is the portion of low virtual memory which should be protected
69421 from userspace allocation. Keeping a user from writing to low pages
69422 can help reduce the impact of kernel NULL pointer bugs.
69423@@ -280,7 +280,7 @@ config MEMORY_FAILURE
69424
69425 config HWPOISON_INJECT
69426 tristate "HWPoison pages injector"
69427- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
69428+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
69429 select PROC_PAGE_MONITOR
69430
69431 config NOMMU_INITIAL_TRIM_EXCESS
69432diff --git a/mm/filemap.c b/mm/filemap.c
69433index 79c4b2b..596b417 100644
69434--- a/mm/filemap.c
69435+++ b/mm/filemap.c
69436@@ -1762,7 +1762,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
69437 struct address_space *mapping = file->f_mapping;
69438
69439 if (!mapping->a_ops->readpage)
69440- return -ENOEXEC;
69441+ return -ENODEV;
69442 file_accessed(file);
69443 vma->vm_ops = &generic_file_vm_ops;
69444 vma->vm_flags |= VM_CAN_NONLINEAR;
69445@@ -2168,6 +2168,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
69446 *pos = i_size_read(inode);
69447
69448 if (limit != RLIM_INFINITY) {
69449+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
69450 if (*pos >= limit) {
69451 send_sig(SIGXFSZ, current, 0);
69452 return -EFBIG;
69453diff --git a/mm/fremap.c b/mm/fremap.c
69454index 9ed4fd4..c42648d 100644
69455--- a/mm/fremap.c
69456+++ b/mm/fremap.c
69457@@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
69458 retry:
69459 vma = find_vma(mm, start);
69460
69461+#ifdef CONFIG_PAX_SEGMEXEC
69462+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
69463+ goto out;
69464+#endif
69465+
69466 /*
69467 * Make sure the vma is shared, that it supports prefaulting,
69468 * and that the remapped range is valid and fully within
69469diff --git a/mm/highmem.c b/mm/highmem.c
69470index 57d82c6..e9e0552 100644
69471--- a/mm/highmem.c
69472+++ b/mm/highmem.c
69473@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
69474 * So no dangers, even with speculative execution.
69475 */
69476 page = pte_page(pkmap_page_table[i]);
69477+ pax_open_kernel();
69478 pte_clear(&init_mm, (unsigned long)page_address(page),
69479 &pkmap_page_table[i]);
69480-
69481+ pax_close_kernel();
69482 set_page_address(page, NULL);
69483 need_flush = 1;
69484 }
69485@@ -186,9 +187,11 @@ start:
69486 }
69487 }
69488 vaddr = PKMAP_ADDR(last_pkmap_nr);
69489+
69490+ pax_open_kernel();
69491 set_pte_at(&init_mm, vaddr,
69492 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
69493-
69494+ pax_close_kernel();
69495 pkmap_count[last_pkmap_nr] = 1;
69496 set_page_address(page, (void *)vaddr);
69497
69498diff --git a/mm/huge_memory.c b/mm/huge_memory.c
69499index f0e5306..cb9398e 100644
69500--- a/mm/huge_memory.c
69501+++ b/mm/huge_memory.c
69502@@ -733,7 +733,7 @@ out:
69503 * run pte_offset_map on the pmd, if an huge pmd could
69504 * materialize from under us from a different thread.
69505 */
69506- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
69507+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
69508 return VM_FAULT_OOM;
69509 /* if an huge pmd materialized from under us just retry later */
69510 if (unlikely(pmd_trans_huge(*pmd)))
69511diff --git a/mm/hugetlb.c b/mm/hugetlb.c
69512index 263e177..3f36aec 100644
69513--- a/mm/hugetlb.c
69514+++ b/mm/hugetlb.c
69515@@ -2446,6 +2446,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
69516 return 1;
69517 }
69518
69519+#ifdef CONFIG_PAX_SEGMEXEC
69520+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
69521+{
69522+ struct mm_struct *mm = vma->vm_mm;
69523+ struct vm_area_struct *vma_m;
69524+ unsigned long address_m;
69525+ pte_t *ptep_m;
69526+
69527+ vma_m = pax_find_mirror_vma(vma);
69528+ if (!vma_m)
69529+ return;
69530+
69531+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69532+ address_m = address + SEGMEXEC_TASK_SIZE;
69533+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
69534+ get_page(page_m);
69535+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
69536+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
69537+}
69538+#endif
69539+
69540 /*
69541 * Hugetlb_cow() should be called with page lock of the original hugepage held.
69542 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
69543@@ -2558,6 +2579,11 @@ retry_avoidcopy:
69544 make_huge_pte(vma, new_page, 1));
69545 page_remove_rmap(old_page);
69546 hugepage_add_new_anon_rmap(new_page, vma, address);
69547+
69548+#ifdef CONFIG_PAX_SEGMEXEC
69549+ pax_mirror_huge_pte(vma, address, new_page);
69550+#endif
69551+
69552 /* Make the old page be freed below */
69553 new_page = old_page;
69554 mmu_notifier_invalidate_range_end(mm,
69555@@ -2712,6 +2738,10 @@ retry:
69556 && (vma->vm_flags & VM_SHARED)));
69557 set_huge_pte_at(mm, address, ptep, new_pte);
69558
69559+#ifdef CONFIG_PAX_SEGMEXEC
69560+ pax_mirror_huge_pte(vma, address, page);
69561+#endif
69562+
69563 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
69564 /* Optimization, do the COW without a second fault */
69565 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
69566@@ -2741,6 +2771,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69567 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
69568 struct hstate *h = hstate_vma(vma);
69569
69570+#ifdef CONFIG_PAX_SEGMEXEC
69571+ struct vm_area_struct *vma_m;
69572+#endif
69573+
69574 address &= huge_page_mask(h);
69575
69576 ptep = huge_pte_offset(mm, address);
69577@@ -2754,6 +2788,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69578 VM_FAULT_SET_HINDEX(h - hstates);
69579 }
69580
69581+#ifdef CONFIG_PAX_SEGMEXEC
69582+ vma_m = pax_find_mirror_vma(vma);
69583+ if (vma_m) {
69584+ unsigned long address_m;
69585+
69586+ if (vma->vm_start > vma_m->vm_start) {
69587+ address_m = address;
69588+ address -= SEGMEXEC_TASK_SIZE;
69589+ vma = vma_m;
69590+ h = hstate_vma(vma);
69591+ } else
69592+ address_m = address + SEGMEXEC_TASK_SIZE;
69593+
69594+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
69595+ return VM_FAULT_OOM;
69596+ address_m &= HPAGE_MASK;
69597+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
69598+ }
69599+#endif
69600+
69601 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
69602 if (!ptep)
69603 return VM_FAULT_OOM;
69604diff --git a/mm/internal.h b/mm/internal.h
69605index 2189af4..f2ca332 100644
69606--- a/mm/internal.h
69607+++ b/mm/internal.h
69608@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
69609 * in mm/page_alloc.c
69610 */
69611 extern void __free_pages_bootmem(struct page *page, unsigned int order);
69612+extern void free_compound_page(struct page *page);
69613 extern void prep_compound_page(struct page *page, unsigned long order);
69614 #ifdef CONFIG_MEMORY_FAILURE
69615 extern bool is_free_buddy_page(struct page *page);
69616diff --git a/mm/kmemleak.c b/mm/kmemleak.c
69617index 45eb621..6ccd8ea 100644
69618--- a/mm/kmemleak.c
69619+++ b/mm/kmemleak.c
69620@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
69621
69622 for (i = 0; i < object->trace_len; i++) {
69623 void *ptr = (void *)object->trace[i];
69624- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
69625+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
69626 }
69627 }
69628
69629diff --git a/mm/maccess.c b/mm/maccess.c
69630index d53adf9..03a24bf 100644
69631--- a/mm/maccess.c
69632+++ b/mm/maccess.c
69633@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
69634 set_fs(KERNEL_DS);
69635 pagefault_disable();
69636 ret = __copy_from_user_inatomic(dst,
69637- (__force const void __user *)src, size);
69638+ (const void __force_user *)src, size);
69639 pagefault_enable();
69640 set_fs(old_fs);
69641
69642@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
69643
69644 set_fs(KERNEL_DS);
69645 pagefault_disable();
69646- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
69647+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
69648 pagefault_enable();
69649 set_fs(old_fs);
69650
69651diff --git a/mm/madvise.c b/mm/madvise.c
69652index 1ccbba5..79e16f9 100644
69653--- a/mm/madvise.c
69654+++ b/mm/madvise.c
69655@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
69656 pgoff_t pgoff;
69657 unsigned long new_flags = vma->vm_flags;
69658
69659+#ifdef CONFIG_PAX_SEGMEXEC
69660+ struct vm_area_struct *vma_m;
69661+#endif
69662+
69663 switch (behavior) {
69664 case MADV_NORMAL:
69665 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
69666@@ -116,6 +120,13 @@ success:
69667 /*
69668 * vm_flags is protected by the mmap_sem held in write mode.
69669 */
69670+
69671+#ifdef CONFIG_PAX_SEGMEXEC
69672+ vma_m = pax_find_mirror_vma(vma);
69673+ if (vma_m)
69674+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
69675+#endif
69676+
69677 vma->vm_flags = new_flags;
69678
69679 out:
69680@@ -174,6 +185,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
69681 struct vm_area_struct ** prev,
69682 unsigned long start, unsigned long end)
69683 {
69684+
69685+#ifdef CONFIG_PAX_SEGMEXEC
69686+ struct vm_area_struct *vma_m;
69687+#endif
69688+
69689 *prev = vma;
69690 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
69691 return -EINVAL;
69692@@ -186,6 +202,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
69693 zap_page_range(vma, start, end - start, &details);
69694 } else
69695 zap_page_range(vma, start, end - start, NULL);
69696+
69697+#ifdef CONFIG_PAX_SEGMEXEC
69698+ vma_m = pax_find_mirror_vma(vma);
69699+ if (vma_m) {
69700+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
69701+ struct zap_details details = {
69702+ .nonlinear_vma = vma_m,
69703+ .last_index = ULONG_MAX,
69704+ };
69705+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
69706+ } else
69707+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
69708+ }
69709+#endif
69710+
69711 return 0;
69712 }
69713
69714@@ -384,6 +415,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
69715 if (end < start)
69716 goto out;
69717
69718+#ifdef CONFIG_PAX_SEGMEXEC
69719+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69720+ if (end > SEGMEXEC_TASK_SIZE)
69721+ goto out;
69722+ } else
69723+#endif
69724+
69725+ if (end > TASK_SIZE)
69726+ goto out;
69727+
69728 error = 0;
69729 if (end == start)
69730 goto out;
69731diff --git a/mm/memory-failure.c b/mm/memory-failure.c
69732index 97cc273..6ed703f 100644
69733--- a/mm/memory-failure.c
69734+++ b/mm/memory-failure.c
69735@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
69736
69737 int sysctl_memory_failure_recovery __read_mostly = 1;
69738
69739-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
69740+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
69741
69742 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
69743
69744@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
69745 pfn, t->comm, t->pid);
69746 si.si_signo = SIGBUS;
69747 si.si_errno = 0;
69748- si.si_addr = (void *)addr;
69749+ si.si_addr = (void __user *)addr;
69750 #ifdef __ARCH_SI_TRAPNO
69751 si.si_trapno = trapno;
69752 #endif
69753@@ -1036,7 +1036,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
69754 }
69755
69756 nr_pages = 1 << compound_trans_order(hpage);
69757- atomic_long_add(nr_pages, &mce_bad_pages);
69758+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
69759
69760 /*
69761 * We need/can do nothing about count=0 pages.
69762@@ -1066,7 +1066,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
69763 if (!PageHWPoison(hpage)
69764 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
69765 || (p != hpage && TestSetPageHWPoison(hpage))) {
69766- atomic_long_sub(nr_pages, &mce_bad_pages);
69767+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69768 return 0;
69769 }
69770 set_page_hwpoison_huge_page(hpage);
69771@@ -1124,7 +1124,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
69772 }
69773 if (hwpoison_filter(p)) {
69774 if (TestClearPageHWPoison(p))
69775- atomic_long_sub(nr_pages, &mce_bad_pages);
69776+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69777 unlock_page(hpage);
69778 put_page(hpage);
69779 return 0;
69780@@ -1319,7 +1319,7 @@ int unpoison_memory(unsigned long pfn)
69781 return 0;
69782 }
69783 if (TestClearPageHWPoison(p))
69784- atomic_long_sub(nr_pages, &mce_bad_pages);
69785+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69786 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
69787 return 0;
69788 }
69789@@ -1333,7 +1333,7 @@ int unpoison_memory(unsigned long pfn)
69790 */
69791 if (TestClearPageHWPoison(page)) {
69792 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
69793- atomic_long_sub(nr_pages, &mce_bad_pages);
69794+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69795 freeit = 1;
69796 if (PageHuge(page))
69797 clear_page_hwpoison_huge_page(page);
69798@@ -1446,7 +1446,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
69799 }
69800 done:
69801 if (!PageHWPoison(hpage))
69802- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
69803+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
69804 set_page_hwpoison_huge_page(hpage);
69805 dequeue_hwpoisoned_huge_page(hpage);
69806 /* keep elevated page count for bad page */
69807@@ -1577,7 +1577,7 @@ int soft_offline_page(struct page *page, int flags)
69808 return ret;
69809
69810 done:
69811- atomic_long_add(1, &mce_bad_pages);
69812+ atomic_long_add_unchecked(1, &mce_bad_pages);
69813 SetPageHWPoison(page);
69814 /* keep elevated page count for bad page */
69815 return ret;
69816diff --git a/mm/memory.c b/mm/memory.c
69817index 6105f47..3363489 100644
69818--- a/mm/memory.c
69819+++ b/mm/memory.c
69820@@ -434,8 +434,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
69821 return;
69822
69823 pmd = pmd_offset(pud, start);
69824+
69825+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
69826 pud_clear(pud);
69827 pmd_free_tlb(tlb, pmd, start);
69828+#endif
69829+
69830 }
69831
69832 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
69833@@ -466,9 +470,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
69834 if (end - 1 > ceiling - 1)
69835 return;
69836
69837+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
69838 pud = pud_offset(pgd, start);
69839 pgd_clear(pgd);
69840 pud_free_tlb(tlb, pud, start);
69841+#endif
69842+
69843 }
69844
69845 /*
69846@@ -1597,12 +1604,6 @@ no_page_table:
69847 return page;
69848 }
69849
69850-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
69851-{
69852- return stack_guard_page_start(vma, addr) ||
69853- stack_guard_page_end(vma, addr+PAGE_SIZE);
69854-}
69855-
69856 /**
69857 * __get_user_pages() - pin user pages in memory
69858 * @tsk: task_struct of target task
69859@@ -1675,10 +1676,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69860 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
69861 i = 0;
69862
69863- do {
69864+ while (nr_pages) {
69865 struct vm_area_struct *vma;
69866
69867- vma = find_extend_vma(mm, start);
69868+ vma = find_vma(mm, start);
69869 if (!vma && in_gate_area(mm, start)) {
69870 unsigned long pg = start & PAGE_MASK;
69871 pgd_t *pgd;
69872@@ -1726,7 +1727,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69873 goto next_page;
69874 }
69875
69876- if (!vma ||
69877+ if (!vma || start < vma->vm_start ||
69878 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
69879 !(vm_flags & vma->vm_flags))
69880 return i ? : -EFAULT;
69881@@ -1753,11 +1754,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69882 int ret;
69883 unsigned int fault_flags = 0;
69884
69885- /* For mlock, just skip the stack guard page. */
69886- if (foll_flags & FOLL_MLOCK) {
69887- if (stack_guard_page(vma, start))
69888- goto next_page;
69889- }
69890 if (foll_flags & FOLL_WRITE)
69891 fault_flags |= FAULT_FLAG_WRITE;
69892 if (nonblocking)
69893@@ -1831,7 +1827,7 @@ next_page:
69894 start += PAGE_SIZE;
69895 nr_pages--;
69896 } while (nr_pages && start < vma->vm_end);
69897- } while (nr_pages);
69898+ }
69899 return i;
69900 }
69901 EXPORT_SYMBOL(__get_user_pages);
69902@@ -2038,6 +2034,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
69903 page_add_file_rmap(page);
69904 set_pte_at(mm, addr, pte, mk_pte(page, prot));
69905
69906+#ifdef CONFIG_PAX_SEGMEXEC
69907+ pax_mirror_file_pte(vma, addr, page, ptl);
69908+#endif
69909+
69910 retval = 0;
69911 pte_unmap_unlock(pte, ptl);
69912 return retval;
69913@@ -2072,10 +2072,22 @@ out:
69914 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
69915 struct page *page)
69916 {
69917+
69918+#ifdef CONFIG_PAX_SEGMEXEC
69919+ struct vm_area_struct *vma_m;
69920+#endif
69921+
69922 if (addr < vma->vm_start || addr >= vma->vm_end)
69923 return -EFAULT;
69924 if (!page_count(page))
69925 return -EINVAL;
69926+
69927+#ifdef CONFIG_PAX_SEGMEXEC
69928+ vma_m = pax_find_mirror_vma(vma);
69929+ if (vma_m)
69930+ vma_m->vm_flags |= VM_INSERTPAGE;
69931+#endif
69932+
69933 vma->vm_flags |= VM_INSERTPAGE;
69934 return insert_page(vma, addr, page, vma->vm_page_prot);
69935 }
69936@@ -2161,6 +2173,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
69937 unsigned long pfn)
69938 {
69939 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
69940+ BUG_ON(vma->vm_mirror);
69941
69942 if (addr < vma->vm_start || addr >= vma->vm_end)
69943 return -EFAULT;
69944@@ -2368,7 +2381,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
69945
69946 BUG_ON(pud_huge(*pud));
69947
69948- pmd = pmd_alloc(mm, pud, addr);
69949+ pmd = (mm == &init_mm) ?
69950+ pmd_alloc_kernel(mm, pud, addr) :
69951+ pmd_alloc(mm, pud, addr);
69952 if (!pmd)
69953 return -ENOMEM;
69954 do {
69955@@ -2388,7 +2403,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
69956 unsigned long next;
69957 int err;
69958
69959- pud = pud_alloc(mm, pgd, addr);
69960+ pud = (mm == &init_mm) ?
69961+ pud_alloc_kernel(mm, pgd, addr) :
69962+ pud_alloc(mm, pgd, addr);
69963 if (!pud)
69964 return -ENOMEM;
69965 do {
69966@@ -2476,6 +2493,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
69967 copy_user_highpage(dst, src, va, vma);
69968 }
69969
69970+#ifdef CONFIG_PAX_SEGMEXEC
69971+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
69972+{
69973+ struct mm_struct *mm = vma->vm_mm;
69974+ spinlock_t *ptl;
69975+ pte_t *pte, entry;
69976+
69977+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
69978+ entry = *pte;
69979+ if (!pte_present(entry)) {
69980+ if (!pte_none(entry)) {
69981+ BUG_ON(pte_file(entry));
69982+ free_swap_and_cache(pte_to_swp_entry(entry));
69983+ pte_clear_not_present_full(mm, address, pte, 0);
69984+ }
69985+ } else {
69986+ struct page *page;
69987+
69988+ flush_cache_page(vma, address, pte_pfn(entry));
69989+ entry = ptep_clear_flush(vma, address, pte);
69990+ BUG_ON(pte_dirty(entry));
69991+ page = vm_normal_page(vma, address, entry);
69992+ if (page) {
69993+ update_hiwater_rss(mm);
69994+ if (PageAnon(page))
69995+ dec_mm_counter_fast(mm, MM_ANONPAGES);
69996+ else
69997+ dec_mm_counter_fast(mm, MM_FILEPAGES);
69998+ page_remove_rmap(page);
69999+ page_cache_release(page);
70000+ }
70001+ }
70002+ pte_unmap_unlock(pte, ptl);
70003+}
70004+
70005+/* PaX: if vma is mirrored, synchronize the mirror's PTE
70006+ *
70007+ * the ptl of the lower mapped page is held on entry and is not released on exit
70008+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
70009+ */
70010+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
70011+{
70012+ struct mm_struct *mm = vma->vm_mm;
70013+ unsigned long address_m;
70014+ spinlock_t *ptl_m;
70015+ struct vm_area_struct *vma_m;
70016+ pmd_t *pmd_m;
70017+ pte_t *pte_m, entry_m;
70018+
70019+ BUG_ON(!page_m || !PageAnon(page_m));
70020+
70021+ vma_m = pax_find_mirror_vma(vma);
70022+ if (!vma_m)
70023+ return;
70024+
70025+ BUG_ON(!PageLocked(page_m));
70026+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70027+ address_m = address + SEGMEXEC_TASK_SIZE;
70028+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70029+ pte_m = pte_offset_map(pmd_m, address_m);
70030+ ptl_m = pte_lockptr(mm, pmd_m);
70031+ if (ptl != ptl_m) {
70032+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70033+ if (!pte_none(*pte_m))
70034+ goto out;
70035+ }
70036+
70037+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70038+ page_cache_get(page_m);
70039+ page_add_anon_rmap(page_m, vma_m, address_m);
70040+ inc_mm_counter_fast(mm, MM_ANONPAGES);
70041+ set_pte_at(mm, address_m, pte_m, entry_m);
70042+ update_mmu_cache(vma_m, address_m, entry_m);
70043+out:
70044+ if (ptl != ptl_m)
70045+ spin_unlock(ptl_m);
70046+ pte_unmap(pte_m);
70047+ unlock_page(page_m);
70048+}
70049+
70050+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
70051+{
70052+ struct mm_struct *mm = vma->vm_mm;
70053+ unsigned long address_m;
70054+ spinlock_t *ptl_m;
70055+ struct vm_area_struct *vma_m;
70056+ pmd_t *pmd_m;
70057+ pte_t *pte_m, entry_m;
70058+
70059+ BUG_ON(!page_m || PageAnon(page_m));
70060+
70061+ vma_m = pax_find_mirror_vma(vma);
70062+ if (!vma_m)
70063+ return;
70064+
70065+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70066+ address_m = address + SEGMEXEC_TASK_SIZE;
70067+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70068+ pte_m = pte_offset_map(pmd_m, address_m);
70069+ ptl_m = pte_lockptr(mm, pmd_m);
70070+ if (ptl != ptl_m) {
70071+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70072+ if (!pte_none(*pte_m))
70073+ goto out;
70074+ }
70075+
70076+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70077+ page_cache_get(page_m);
70078+ page_add_file_rmap(page_m);
70079+ inc_mm_counter_fast(mm, MM_FILEPAGES);
70080+ set_pte_at(mm, address_m, pte_m, entry_m);
70081+ update_mmu_cache(vma_m, address_m, entry_m);
70082+out:
70083+ if (ptl != ptl_m)
70084+ spin_unlock(ptl_m);
70085+ pte_unmap(pte_m);
70086+}
70087+
70088+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
70089+{
70090+ struct mm_struct *mm = vma->vm_mm;
70091+ unsigned long address_m;
70092+ spinlock_t *ptl_m;
70093+ struct vm_area_struct *vma_m;
70094+ pmd_t *pmd_m;
70095+ pte_t *pte_m, entry_m;
70096+
70097+ vma_m = pax_find_mirror_vma(vma);
70098+ if (!vma_m)
70099+ return;
70100+
70101+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70102+ address_m = address + SEGMEXEC_TASK_SIZE;
70103+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70104+ pte_m = pte_offset_map(pmd_m, address_m);
70105+ ptl_m = pte_lockptr(mm, pmd_m);
70106+ if (ptl != ptl_m) {
70107+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70108+ if (!pte_none(*pte_m))
70109+ goto out;
70110+ }
70111+
70112+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
70113+ set_pte_at(mm, address_m, pte_m, entry_m);
70114+out:
70115+ if (ptl != ptl_m)
70116+ spin_unlock(ptl_m);
70117+ pte_unmap(pte_m);
70118+}
70119+
70120+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
70121+{
70122+ struct page *page_m;
70123+ pte_t entry;
70124+
70125+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
70126+ goto out;
70127+
70128+ entry = *pte;
70129+ page_m = vm_normal_page(vma, address, entry);
70130+ if (!page_m)
70131+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
70132+ else if (PageAnon(page_m)) {
70133+ if (pax_find_mirror_vma(vma)) {
70134+ pte_unmap_unlock(pte, ptl);
70135+ lock_page(page_m);
70136+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
70137+ if (pte_same(entry, *pte))
70138+ pax_mirror_anon_pte(vma, address, page_m, ptl);
70139+ else
70140+ unlock_page(page_m);
70141+ }
70142+ } else
70143+ pax_mirror_file_pte(vma, address, page_m, ptl);
70144+
70145+out:
70146+ pte_unmap_unlock(pte, ptl);
70147+}
70148+#endif
70149+
70150 /*
70151 * This routine handles present pages, when users try to write
70152 * to a shared page. It is done by copying the page to a new address
70153@@ -2687,6 +2884,12 @@ gotten:
70154 */
70155 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70156 if (likely(pte_same(*page_table, orig_pte))) {
70157+
70158+#ifdef CONFIG_PAX_SEGMEXEC
70159+ if (pax_find_mirror_vma(vma))
70160+ BUG_ON(!trylock_page(new_page));
70161+#endif
70162+
70163 if (old_page) {
70164 if (!PageAnon(old_page)) {
70165 dec_mm_counter_fast(mm, MM_FILEPAGES);
70166@@ -2738,6 +2941,10 @@ gotten:
70167 page_remove_rmap(old_page);
70168 }
70169
70170+#ifdef CONFIG_PAX_SEGMEXEC
70171+ pax_mirror_anon_pte(vma, address, new_page, ptl);
70172+#endif
70173+
70174 /* Free the old page.. */
70175 new_page = old_page;
70176 ret |= VM_FAULT_WRITE;
70177@@ -3017,6 +3224,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70178 swap_free(entry);
70179 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
70180 try_to_free_swap(page);
70181+
70182+#ifdef CONFIG_PAX_SEGMEXEC
70183+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
70184+#endif
70185+
70186 unlock_page(page);
70187 if (swapcache) {
70188 /*
70189@@ -3040,6 +3252,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70190
70191 /* No need to invalidate - it was non-present before */
70192 update_mmu_cache(vma, address, page_table);
70193+
70194+#ifdef CONFIG_PAX_SEGMEXEC
70195+ pax_mirror_anon_pte(vma, address, page, ptl);
70196+#endif
70197+
70198 unlock:
70199 pte_unmap_unlock(page_table, ptl);
70200 out:
70201@@ -3059,40 +3276,6 @@ out_release:
70202 }
70203
70204 /*
70205- * This is like a special single-page "expand_{down|up}wards()",
70206- * except we must first make sure that 'address{-|+}PAGE_SIZE'
70207- * doesn't hit another vma.
70208- */
70209-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
70210-{
70211- address &= PAGE_MASK;
70212- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
70213- struct vm_area_struct *prev = vma->vm_prev;
70214-
70215- /*
70216- * Is there a mapping abutting this one below?
70217- *
70218- * That's only ok if it's the same stack mapping
70219- * that has gotten split..
70220- */
70221- if (prev && prev->vm_end == address)
70222- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
70223-
70224- expand_downwards(vma, address - PAGE_SIZE);
70225- }
70226- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
70227- struct vm_area_struct *next = vma->vm_next;
70228-
70229- /* As VM_GROWSDOWN but s/below/above/ */
70230- if (next && next->vm_start == address + PAGE_SIZE)
70231- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
70232-
70233- expand_upwards(vma, address + PAGE_SIZE);
70234- }
70235- return 0;
70236-}
70237-
70238-/*
70239 * We enter with non-exclusive mmap_sem (to exclude vma changes,
70240 * but allow concurrent faults), and pte mapped but not yet locked.
70241 * We return with mmap_sem still held, but pte unmapped and unlocked.
70242@@ -3101,27 +3284,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
70243 unsigned long address, pte_t *page_table, pmd_t *pmd,
70244 unsigned int flags)
70245 {
70246- struct page *page;
70247+ struct page *page = NULL;
70248 spinlock_t *ptl;
70249 pte_t entry;
70250
70251- pte_unmap(page_table);
70252-
70253- /* Check if we need to add a guard page to the stack */
70254- if (check_stack_guard_page(vma, address) < 0)
70255- return VM_FAULT_SIGBUS;
70256-
70257- /* Use the zero-page for reads */
70258 if (!(flags & FAULT_FLAG_WRITE)) {
70259 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
70260 vma->vm_page_prot));
70261- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70262+ ptl = pte_lockptr(mm, pmd);
70263+ spin_lock(ptl);
70264 if (!pte_none(*page_table))
70265 goto unlock;
70266 goto setpte;
70267 }
70268
70269 /* Allocate our own private page. */
70270+ pte_unmap(page_table);
70271+
70272 if (unlikely(anon_vma_prepare(vma)))
70273 goto oom;
70274 page = alloc_zeroed_user_highpage_movable(vma, address);
70275@@ -3140,6 +3319,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
70276 if (!pte_none(*page_table))
70277 goto release;
70278
70279+#ifdef CONFIG_PAX_SEGMEXEC
70280+ if (pax_find_mirror_vma(vma))
70281+ BUG_ON(!trylock_page(page));
70282+#endif
70283+
70284 inc_mm_counter_fast(mm, MM_ANONPAGES);
70285 page_add_new_anon_rmap(page, vma, address);
70286 setpte:
70287@@ -3147,6 +3331,12 @@ setpte:
70288
70289 /* No need to invalidate - it was non-present before */
70290 update_mmu_cache(vma, address, page_table);
70291+
70292+#ifdef CONFIG_PAX_SEGMEXEC
70293+ if (page)
70294+ pax_mirror_anon_pte(vma, address, page, ptl);
70295+#endif
70296+
70297 unlock:
70298 pte_unmap_unlock(page_table, ptl);
70299 return 0;
70300@@ -3290,6 +3480,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70301 */
70302 /* Only go through if we didn't race with anybody else... */
70303 if (likely(pte_same(*page_table, orig_pte))) {
70304+
70305+#ifdef CONFIG_PAX_SEGMEXEC
70306+ if (anon && pax_find_mirror_vma(vma))
70307+ BUG_ON(!trylock_page(page));
70308+#endif
70309+
70310 flush_icache_page(vma, page);
70311 entry = mk_pte(page, vma->vm_page_prot);
70312 if (flags & FAULT_FLAG_WRITE)
70313@@ -3309,6 +3505,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70314
70315 /* no need to invalidate: a not-present page won't be cached */
70316 update_mmu_cache(vma, address, page_table);
70317+
70318+#ifdef CONFIG_PAX_SEGMEXEC
70319+ if (anon)
70320+ pax_mirror_anon_pte(vma, address, page, ptl);
70321+ else
70322+ pax_mirror_file_pte(vma, address, page, ptl);
70323+#endif
70324+
70325 } else {
70326 if (cow_page)
70327 mem_cgroup_uncharge_page(cow_page);
70328@@ -3462,6 +3666,12 @@ int handle_pte_fault(struct mm_struct *mm,
70329 if (flags & FAULT_FLAG_WRITE)
70330 flush_tlb_fix_spurious_fault(vma, address);
70331 }
70332+
70333+#ifdef CONFIG_PAX_SEGMEXEC
70334+ pax_mirror_pte(vma, address, pte, pmd, ptl);
70335+ return 0;
70336+#endif
70337+
70338 unlock:
70339 pte_unmap_unlock(pte, ptl);
70340 return 0;
70341@@ -3478,6 +3688,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70342 pmd_t *pmd;
70343 pte_t *pte;
70344
70345+#ifdef CONFIG_PAX_SEGMEXEC
70346+ struct vm_area_struct *vma_m;
70347+#endif
70348+
70349 __set_current_state(TASK_RUNNING);
70350
70351 count_vm_event(PGFAULT);
70352@@ -3489,6 +3703,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70353 if (unlikely(is_vm_hugetlb_page(vma)))
70354 return hugetlb_fault(mm, vma, address, flags);
70355
70356+#ifdef CONFIG_PAX_SEGMEXEC
70357+ vma_m = pax_find_mirror_vma(vma);
70358+ if (vma_m) {
70359+ unsigned long address_m;
70360+ pgd_t *pgd_m;
70361+ pud_t *pud_m;
70362+ pmd_t *pmd_m;
70363+
70364+ if (vma->vm_start > vma_m->vm_start) {
70365+ address_m = address;
70366+ address -= SEGMEXEC_TASK_SIZE;
70367+ vma = vma_m;
70368+ } else
70369+ address_m = address + SEGMEXEC_TASK_SIZE;
70370+
70371+ pgd_m = pgd_offset(mm, address_m);
70372+ pud_m = pud_alloc(mm, pgd_m, address_m);
70373+ if (!pud_m)
70374+ return VM_FAULT_OOM;
70375+ pmd_m = pmd_alloc(mm, pud_m, address_m);
70376+ if (!pmd_m)
70377+ return VM_FAULT_OOM;
70378+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
70379+ return VM_FAULT_OOM;
70380+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
70381+ }
70382+#endif
70383+
70384 pgd = pgd_offset(mm, address);
70385 pud = pud_alloc(mm, pgd, address);
70386 if (!pud)
70387@@ -3518,7 +3760,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70388 * run pte_offset_map on the pmd, if an huge pmd could
70389 * materialize from under us from a different thread.
70390 */
70391- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
70392+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
70393 return VM_FAULT_OOM;
70394 /* if an huge pmd materialized from under us just retry later */
70395 if (unlikely(pmd_trans_huge(*pmd)))
70396@@ -3555,6 +3797,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
70397 spin_unlock(&mm->page_table_lock);
70398 return 0;
70399 }
70400+
70401+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
70402+{
70403+ pud_t *new = pud_alloc_one(mm, address);
70404+ if (!new)
70405+ return -ENOMEM;
70406+
70407+ smp_wmb(); /* See comment in __pte_alloc */
70408+
70409+ spin_lock(&mm->page_table_lock);
70410+ if (pgd_present(*pgd)) /* Another has populated it */
70411+ pud_free(mm, new);
70412+ else
70413+ pgd_populate_kernel(mm, pgd, new);
70414+ spin_unlock(&mm->page_table_lock);
70415+ return 0;
70416+}
70417 #endif /* __PAGETABLE_PUD_FOLDED */
70418
70419 #ifndef __PAGETABLE_PMD_FOLDED
70420@@ -3585,6 +3844,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
70421 spin_unlock(&mm->page_table_lock);
70422 return 0;
70423 }
70424+
70425+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
70426+{
70427+ pmd_t *new = pmd_alloc_one(mm, address);
70428+ if (!new)
70429+ return -ENOMEM;
70430+
70431+ smp_wmb(); /* See comment in __pte_alloc */
70432+
70433+ spin_lock(&mm->page_table_lock);
70434+#ifndef __ARCH_HAS_4LEVEL_HACK
70435+ if (pud_present(*pud)) /* Another has populated it */
70436+ pmd_free(mm, new);
70437+ else
70438+ pud_populate_kernel(mm, pud, new);
70439+#else
70440+ if (pgd_present(*pud)) /* Another has populated it */
70441+ pmd_free(mm, new);
70442+ else
70443+ pgd_populate_kernel(mm, pud, new);
70444+#endif /* __ARCH_HAS_4LEVEL_HACK */
70445+ spin_unlock(&mm->page_table_lock);
70446+ return 0;
70447+}
70448 #endif /* __PAGETABLE_PMD_FOLDED */
70449
70450 int make_pages_present(unsigned long addr, unsigned long end)
70451@@ -3622,7 +3905,7 @@ static int __init gate_vma_init(void)
70452 gate_vma.vm_start = FIXADDR_USER_START;
70453 gate_vma.vm_end = FIXADDR_USER_END;
70454 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
70455- gate_vma.vm_page_prot = __P101;
70456+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
70457
70458 return 0;
70459 }
70460diff --git a/mm/mempolicy.c b/mm/mempolicy.c
70461index bf5b485..e44c2cb 100644
70462--- a/mm/mempolicy.c
70463+++ b/mm/mempolicy.c
70464@@ -619,6 +619,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
70465 unsigned long vmstart;
70466 unsigned long vmend;
70467
70468+#ifdef CONFIG_PAX_SEGMEXEC
70469+ struct vm_area_struct *vma_m;
70470+#endif
70471+
70472 vma = find_vma(mm, start);
70473 if (!vma || vma->vm_start > start)
70474 return -EFAULT;
70475@@ -672,6 +676,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
70476 if (err)
70477 goto out;
70478 }
70479+
70480+#ifdef CONFIG_PAX_SEGMEXEC
70481+ vma_m = pax_find_mirror_vma(vma);
70482+ if (vma_m && vma_m->vm_ops && vma_m->vm_ops->set_policy) {
70483+ err = vma_m->vm_ops->set_policy(vma_m, new_pol);
70484+ if (err)
70485+ goto out;
70486+ }
70487+#endif
70488+
70489 }
70490
70491 out:
70492@@ -1105,6 +1119,17 @@ static long do_mbind(unsigned long start, unsigned long len,
70493
70494 if (end < start)
70495 return -EINVAL;
70496+
70497+#ifdef CONFIG_PAX_SEGMEXEC
70498+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
70499+ if (end > SEGMEXEC_TASK_SIZE)
70500+ return -EINVAL;
70501+ } else
70502+#endif
70503+
70504+ if (end > TASK_SIZE)
70505+ return -EINVAL;
70506+
70507 if (end == start)
70508 return 0;
70509
70510@@ -1328,8 +1353,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
70511 */
70512 tcred = __task_cred(task);
70513 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
70514- cred->uid != tcred->suid && cred->uid != tcred->uid &&
70515- !capable(CAP_SYS_NICE)) {
70516+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
70517 rcu_read_unlock();
70518 err = -EPERM;
70519 goto out_put;
70520@@ -1360,6 +1384,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
70521 goto out;
70522 }
70523
70524+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70525+ if (mm != current->mm &&
70526+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
70527+ mmput(mm);
70528+ err = -EPERM;
70529+ goto out;
70530+ }
70531+#endif
70532+
70533 err = do_migrate_pages(mm, old, new,
70534 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
70535
70536diff --git a/mm/mlock.c b/mm/mlock.c
70537index ef726e8..13e0901 100644
70538--- a/mm/mlock.c
70539+++ b/mm/mlock.c
70540@@ -13,6 +13,7 @@
70541 #include <linux/pagemap.h>
70542 #include <linux/mempolicy.h>
70543 #include <linux/syscalls.h>
70544+#include <linux/security.h>
70545 #include <linux/sched.h>
70546 #include <linux/export.h>
70547 #include <linux/rmap.h>
70548@@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
70549 return -EINVAL;
70550 if (end == start)
70551 return 0;
70552+ if (end > TASK_SIZE)
70553+ return -EINVAL;
70554+
70555 vma = find_vma(current->mm, start);
70556 if (!vma || vma->vm_start > start)
70557 return -ENOMEM;
70558@@ -396,6 +400,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
70559 for (nstart = start ; ; ) {
70560 vm_flags_t newflags;
70561
70562+#ifdef CONFIG_PAX_SEGMEXEC
70563+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
70564+ break;
70565+#endif
70566+
70567 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
70568
70569 newflags = vma->vm_flags | VM_LOCKED;
70570@@ -501,6 +510,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
70571 lock_limit >>= PAGE_SHIFT;
70572
70573 /* check against resource limits */
70574+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
70575 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
70576 error = do_mlock(start, len, 1);
70577 up_write(&current->mm->mmap_sem);
70578@@ -524,17 +534,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
70579 static int do_mlockall(int flags)
70580 {
70581 struct vm_area_struct * vma, * prev = NULL;
70582- unsigned int def_flags = 0;
70583
70584 if (flags & MCL_FUTURE)
70585- def_flags = VM_LOCKED;
70586- current->mm->def_flags = def_flags;
70587+ current->mm->def_flags |= VM_LOCKED;
70588+ else
70589+ current->mm->def_flags &= ~VM_LOCKED;
70590 if (flags == MCL_FUTURE)
70591 goto out;
70592
70593 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
70594 vm_flags_t newflags;
70595
70596+#ifdef CONFIG_PAX_SEGMEXEC
70597+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
70598+ break;
70599+#endif
70600+
70601+ BUG_ON(vma->vm_end > TASK_SIZE);
70602 newflags = vma->vm_flags | VM_LOCKED;
70603 if (!(flags & MCL_CURRENT))
70604 newflags &= ~VM_LOCKED;
70605@@ -567,6 +583,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
70606 lock_limit >>= PAGE_SHIFT;
70607
70608 ret = -ENOMEM;
70609+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
70610 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
70611 capable(CAP_IPC_LOCK))
70612 ret = do_mlockall(flags);
70613diff --git a/mm/mmap.c b/mm/mmap.c
70614index 848ef52..d2b586c 100644
70615--- a/mm/mmap.c
70616+++ b/mm/mmap.c
70617@@ -46,6 +46,16 @@
70618 #define arch_rebalance_pgtables(addr, len) (addr)
70619 #endif
70620
70621+static inline void verify_mm_writelocked(struct mm_struct *mm)
70622+{
70623+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
70624+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
70625+ up_read(&mm->mmap_sem);
70626+ BUG();
70627+ }
70628+#endif
70629+}
70630+
70631 static void unmap_region(struct mm_struct *mm,
70632 struct vm_area_struct *vma, struct vm_area_struct *prev,
70633 unsigned long start, unsigned long end);
70634@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
70635 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
70636 *
70637 */
70638-pgprot_t protection_map[16] = {
70639+pgprot_t protection_map[16] __read_only = {
70640 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
70641 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
70642 };
70643
70644-pgprot_t vm_get_page_prot(unsigned long vm_flags)
70645+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
70646 {
70647- return __pgprot(pgprot_val(protection_map[vm_flags &
70648+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
70649 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
70650 pgprot_val(arch_vm_get_page_prot(vm_flags)));
70651+
70652+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70653+ if (!(__supported_pte_mask & _PAGE_NX) &&
70654+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
70655+ (vm_flags & (VM_READ | VM_WRITE)))
70656+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
70657+#endif
70658+
70659+ return prot;
70660 }
70661 EXPORT_SYMBOL(vm_get_page_prot);
70662
70663 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
70664 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
70665 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
70666+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
70667 /*
70668 * Make sure vm_committed_as in one cacheline and not cacheline shared with
70669 * other variables. It can be updated by several CPUs frequently.
70670@@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
70671 struct vm_area_struct *next = vma->vm_next;
70672
70673 might_sleep();
70674+ BUG_ON(vma->vm_mirror);
70675 if (vma->vm_ops && vma->vm_ops->close)
70676 vma->vm_ops->close(vma);
70677 if (vma->vm_file) {
70678@@ -274,6 +295,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
70679 * not page aligned -Ram Gupta
70680 */
70681 rlim = rlimit(RLIMIT_DATA);
70682+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
70683 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
70684 (mm->end_data - mm->start_data) > rlim)
70685 goto out;
70686@@ -690,6 +712,12 @@ static int
70687 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
70688 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
70689 {
70690+
70691+#ifdef CONFIG_PAX_SEGMEXEC
70692+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
70693+ return 0;
70694+#endif
70695+
70696 if (is_mergeable_vma(vma, file, vm_flags) &&
70697 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
70698 if (vma->vm_pgoff == vm_pgoff)
70699@@ -709,6 +737,12 @@ static int
70700 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
70701 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
70702 {
70703+
70704+#ifdef CONFIG_PAX_SEGMEXEC
70705+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
70706+ return 0;
70707+#endif
70708+
70709 if (is_mergeable_vma(vma, file, vm_flags) &&
70710 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
70711 pgoff_t vm_pglen;
70712@@ -751,13 +785,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
70713 struct vm_area_struct *vma_merge(struct mm_struct *mm,
70714 struct vm_area_struct *prev, unsigned long addr,
70715 unsigned long end, unsigned long vm_flags,
70716- struct anon_vma *anon_vma, struct file *file,
70717+ struct anon_vma *anon_vma, struct file *file,
70718 pgoff_t pgoff, struct mempolicy *policy)
70719 {
70720 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
70721 struct vm_area_struct *area, *next;
70722 int err;
70723
70724+#ifdef CONFIG_PAX_SEGMEXEC
70725+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
70726+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
70727+
70728+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
70729+#endif
70730+
70731 /*
70732 * We later require that vma->vm_flags == vm_flags,
70733 * so this tests vma->vm_flags & VM_SPECIAL, too.
70734@@ -773,6 +814,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70735 if (next && next->vm_end == end) /* cases 6, 7, 8 */
70736 next = next->vm_next;
70737
70738+#ifdef CONFIG_PAX_SEGMEXEC
70739+ if (prev)
70740+ prev_m = pax_find_mirror_vma(prev);
70741+ if (area)
70742+ area_m = pax_find_mirror_vma(area);
70743+ if (next)
70744+ next_m = pax_find_mirror_vma(next);
70745+#endif
70746+
70747 /*
70748 * Can it merge with the predecessor?
70749 */
70750@@ -792,9 +842,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70751 /* cases 1, 6 */
70752 err = vma_adjust(prev, prev->vm_start,
70753 next->vm_end, prev->vm_pgoff, NULL);
70754- } else /* cases 2, 5, 7 */
70755+
70756+#ifdef CONFIG_PAX_SEGMEXEC
70757+ if (!err && prev_m)
70758+ err = vma_adjust(prev_m, prev_m->vm_start,
70759+ next_m->vm_end, prev_m->vm_pgoff, NULL);
70760+#endif
70761+
70762+ } else { /* cases 2, 5, 7 */
70763 err = vma_adjust(prev, prev->vm_start,
70764 end, prev->vm_pgoff, NULL);
70765+
70766+#ifdef CONFIG_PAX_SEGMEXEC
70767+ if (!err && prev_m)
70768+ err = vma_adjust(prev_m, prev_m->vm_start,
70769+ end_m, prev_m->vm_pgoff, NULL);
70770+#endif
70771+
70772+ }
70773 if (err)
70774 return NULL;
70775 khugepaged_enter_vma_merge(prev);
70776@@ -808,12 +873,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70777 mpol_equal(policy, vma_policy(next)) &&
70778 can_vma_merge_before(next, vm_flags,
70779 anon_vma, file, pgoff+pglen)) {
70780- if (prev && addr < prev->vm_end) /* case 4 */
70781+ if (prev && addr < prev->vm_end) { /* case 4 */
70782 err = vma_adjust(prev, prev->vm_start,
70783 addr, prev->vm_pgoff, NULL);
70784- else /* cases 3, 8 */
70785+
70786+#ifdef CONFIG_PAX_SEGMEXEC
70787+ if (!err && prev_m)
70788+ err = vma_adjust(prev_m, prev_m->vm_start,
70789+ addr_m, prev_m->vm_pgoff, NULL);
70790+#endif
70791+
70792+ } else { /* cases 3, 8 */
70793 err = vma_adjust(area, addr, next->vm_end,
70794 next->vm_pgoff - pglen, NULL);
70795+
70796+#ifdef CONFIG_PAX_SEGMEXEC
70797+ if (!err && area_m)
70798+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
70799+ next_m->vm_pgoff - pglen, NULL);
70800+#endif
70801+
70802+ }
70803 if (err)
70804 return NULL;
70805 khugepaged_enter_vma_merge(area);
70806@@ -922,14 +1002,11 @@ none:
70807 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
70808 struct file *file, long pages)
70809 {
70810- const unsigned long stack_flags
70811- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
70812-
70813 if (file) {
70814 mm->shared_vm += pages;
70815 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
70816 mm->exec_vm += pages;
70817- } else if (flags & stack_flags)
70818+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
70819 mm->stack_vm += pages;
70820 if (flags & (VM_RESERVED|VM_IO))
70821 mm->reserved_vm += pages;
70822@@ -969,7 +1046,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70823 * (the exception is when the underlying filesystem is noexec
70824 * mounted, in which case we dont add PROT_EXEC.)
70825 */
70826- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
70827+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
70828 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
70829 prot |= PROT_EXEC;
70830
70831@@ -995,7 +1072,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70832 /* Obtain the address to map to. we verify (or select) it and ensure
70833 * that it represents a valid section of the address space.
70834 */
70835- addr = get_unmapped_area(file, addr, len, pgoff, flags);
70836+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
70837 if (addr & ~PAGE_MASK)
70838 return addr;
70839
70840@@ -1006,6 +1083,36 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70841 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
70842 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
70843
70844+#ifdef CONFIG_PAX_MPROTECT
70845+ if (mm->pax_flags & MF_PAX_MPROTECT) {
70846+#ifndef CONFIG_PAX_MPROTECT_COMPAT
70847+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
70848+ gr_log_rwxmmap(file);
70849+
70850+#ifdef CONFIG_PAX_EMUPLT
70851+ vm_flags &= ~VM_EXEC;
70852+#else
70853+ return -EPERM;
70854+#endif
70855+
70856+ }
70857+
70858+ if (!(vm_flags & VM_EXEC))
70859+ vm_flags &= ~VM_MAYEXEC;
70860+#else
70861+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
70862+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
70863+#endif
70864+ else
70865+ vm_flags &= ~VM_MAYWRITE;
70866+ }
70867+#endif
70868+
70869+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70870+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
70871+ vm_flags &= ~VM_PAGEEXEC;
70872+#endif
70873+
70874 if (flags & MAP_LOCKED)
70875 if (!can_do_mlock())
70876 return -EPERM;
70877@@ -1017,6 +1124,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70878 locked += mm->locked_vm;
70879 lock_limit = rlimit(RLIMIT_MEMLOCK);
70880 lock_limit >>= PAGE_SHIFT;
70881+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
70882 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
70883 return -EAGAIN;
70884 }
70885@@ -1087,6 +1195,9 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70886 if (error)
70887 return error;
70888
70889+ if (!gr_acl_handle_mmap(file, prot))
70890+ return -EACCES;
70891+
70892 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
70893 }
70894
70895@@ -1192,7 +1303,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
70896 vm_flags_t vm_flags = vma->vm_flags;
70897
70898 /* If it was private or non-writable, the write bit is already clear */
70899- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
70900+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
70901 return 0;
70902
70903 /* The backer wishes to know when pages are first written to? */
70904@@ -1241,14 +1352,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
70905 unsigned long charged = 0;
70906 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
70907
70908+#ifdef CONFIG_PAX_SEGMEXEC
70909+ struct vm_area_struct *vma_m = NULL;
70910+#endif
70911+
70912+ /*
70913+ * mm->mmap_sem is required to protect against another thread
70914+ * changing the mappings in case we sleep.
70915+ */
70916+ verify_mm_writelocked(mm);
70917+
70918 /* Clear old maps */
70919 error = -ENOMEM;
70920-munmap_back:
70921 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70922 if (vma && vma->vm_start < addr + len) {
70923 if (do_munmap(mm, addr, len))
70924 return -ENOMEM;
70925- goto munmap_back;
70926+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70927+ BUG_ON(vma && vma->vm_start < addr + len);
70928 }
70929
70930 /* Check against address space limit. */
70931@@ -1297,6 +1418,16 @@ munmap_back:
70932 goto unacct_error;
70933 }
70934
70935+#ifdef CONFIG_PAX_SEGMEXEC
70936+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
70937+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70938+ if (!vma_m) {
70939+ error = -ENOMEM;
70940+ goto free_vma;
70941+ }
70942+ }
70943+#endif
70944+
70945 vma->vm_mm = mm;
70946 vma->vm_start = addr;
70947 vma->vm_end = addr + len;
70948@@ -1321,6 +1452,19 @@ munmap_back:
70949 error = file->f_op->mmap(file, vma);
70950 if (error)
70951 goto unmap_and_free_vma;
70952+
70953+#ifdef CONFIG_PAX_SEGMEXEC
70954+ if (vma_m && (vm_flags & VM_EXECUTABLE))
70955+ added_exe_file_vma(mm);
70956+#endif
70957+
70958+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70959+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
70960+ vma->vm_flags |= VM_PAGEEXEC;
70961+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
70962+ }
70963+#endif
70964+
70965 if (vm_flags & VM_EXECUTABLE)
70966 added_exe_file_vma(mm);
70967
70968@@ -1358,6 +1502,11 @@ munmap_back:
70969 vma_link(mm, vma, prev, rb_link, rb_parent);
70970 file = vma->vm_file;
70971
70972+#ifdef CONFIG_PAX_SEGMEXEC
70973+ if (vma_m)
70974+ BUG_ON(pax_mirror_vma(vma_m, vma));
70975+#endif
70976+
70977 /* Once vma denies write, undo our temporary denial count */
70978 if (correct_wcount)
70979 atomic_inc(&inode->i_writecount);
70980@@ -1366,6 +1515,7 @@ out:
70981
70982 mm->total_vm += len >> PAGE_SHIFT;
70983 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
70984+ track_exec_limit(mm, addr, addr + len, vm_flags);
70985 if (vm_flags & VM_LOCKED) {
70986 if (!mlock_vma_pages_range(vma, addr, addr + len))
70987 mm->locked_vm += (len >> PAGE_SHIFT);
70988@@ -1383,6 +1533,12 @@ unmap_and_free_vma:
70989 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
70990 charged = 0;
70991 free_vma:
70992+
70993+#ifdef CONFIG_PAX_SEGMEXEC
70994+ if (vma_m)
70995+ kmem_cache_free(vm_area_cachep, vma_m);
70996+#endif
70997+
70998 kmem_cache_free(vm_area_cachep, vma);
70999 unacct_error:
71000 if (charged)
71001@@ -1390,6 +1546,44 @@ unacct_error:
71002 return error;
71003 }
71004
71005+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
71006+{
71007+ if (!vma) {
71008+#ifdef CONFIG_STACK_GROWSUP
71009+ if (addr > sysctl_heap_stack_gap)
71010+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
71011+ else
71012+ vma = find_vma(current->mm, 0);
71013+ if (vma && (vma->vm_flags & VM_GROWSUP))
71014+ return false;
71015+#endif
71016+ return true;
71017+ }
71018+
71019+ if (addr + len > vma->vm_start)
71020+ return false;
71021+
71022+ if (vma->vm_flags & VM_GROWSDOWN)
71023+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
71024+#ifdef CONFIG_STACK_GROWSUP
71025+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
71026+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
71027+#endif
71028+
71029+ return true;
71030+}
71031+
71032+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
71033+{
71034+ if (vma->vm_start < len)
71035+ return -ENOMEM;
71036+ if (!(vma->vm_flags & VM_GROWSDOWN))
71037+ return vma->vm_start - len;
71038+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
71039+ return vma->vm_start - len - sysctl_heap_stack_gap;
71040+ return -ENOMEM;
71041+}
71042+
71043 /* Get an address range which is currently unmapped.
71044 * For shmat() with addr=0.
71045 *
71046@@ -1416,18 +1610,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
71047 if (flags & MAP_FIXED)
71048 return addr;
71049
71050+#ifdef CONFIG_PAX_RANDMMAP
71051+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71052+#endif
71053+
71054 if (addr) {
71055 addr = PAGE_ALIGN(addr);
71056- vma = find_vma(mm, addr);
71057- if (TASK_SIZE - len >= addr &&
71058- (!vma || addr + len <= vma->vm_start))
71059- return addr;
71060+ if (TASK_SIZE - len >= addr) {
71061+ vma = find_vma(mm, addr);
71062+ if (check_heap_stack_gap(vma, addr, len))
71063+ return addr;
71064+ }
71065 }
71066 if (len > mm->cached_hole_size) {
71067- start_addr = addr = mm->free_area_cache;
71068+ start_addr = addr = mm->free_area_cache;
71069 } else {
71070- start_addr = addr = TASK_UNMAPPED_BASE;
71071- mm->cached_hole_size = 0;
71072+ start_addr = addr = mm->mmap_base;
71073+ mm->cached_hole_size = 0;
71074 }
71075
71076 full_search:
71077@@ -1438,34 +1637,40 @@ full_search:
71078 * Start a new search - just in case we missed
71079 * some holes.
71080 */
71081- if (start_addr != TASK_UNMAPPED_BASE) {
71082- addr = TASK_UNMAPPED_BASE;
71083- start_addr = addr;
71084+ if (start_addr != mm->mmap_base) {
71085+ start_addr = addr = mm->mmap_base;
71086 mm->cached_hole_size = 0;
71087 goto full_search;
71088 }
71089 return -ENOMEM;
71090 }
71091- if (!vma || addr + len <= vma->vm_start) {
71092- /*
71093- * Remember the place where we stopped the search:
71094- */
71095- mm->free_area_cache = addr + len;
71096- return addr;
71097- }
71098+ if (check_heap_stack_gap(vma, addr, len))
71099+ break;
71100 if (addr + mm->cached_hole_size < vma->vm_start)
71101 mm->cached_hole_size = vma->vm_start - addr;
71102 addr = vma->vm_end;
71103 }
71104+
71105+ /*
71106+ * Remember the place where we stopped the search:
71107+ */
71108+ mm->free_area_cache = addr + len;
71109+ return addr;
71110 }
71111 #endif
71112
71113 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
71114 {
71115+
71116+#ifdef CONFIG_PAX_SEGMEXEC
71117+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71118+ return;
71119+#endif
71120+
71121 /*
71122 * Is this a new hole at the lowest possible address?
71123 */
71124- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
71125+ if (addr >= mm->mmap_base && addr < mm->free_area_cache)
71126 mm->free_area_cache = addr;
71127 }
71128
71129@@ -1481,7 +1686,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71130 {
71131 struct vm_area_struct *vma;
71132 struct mm_struct *mm = current->mm;
71133- unsigned long addr = addr0, start_addr;
71134+ unsigned long base = mm->mmap_base, addr = addr0, start_addr;
71135
71136 /* requested length too big for entire address space */
71137 if (len > TASK_SIZE)
71138@@ -1490,13 +1695,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71139 if (flags & MAP_FIXED)
71140 return addr;
71141
71142+#ifdef CONFIG_PAX_RANDMMAP
71143+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71144+#endif
71145+
71146 /* requesting a specific address */
71147 if (addr) {
71148 addr = PAGE_ALIGN(addr);
71149- vma = find_vma(mm, addr);
71150- if (TASK_SIZE - len >= addr &&
71151- (!vma || addr + len <= vma->vm_start))
71152- return addr;
71153+ if (TASK_SIZE - len >= addr) {
71154+ vma = find_vma(mm, addr);
71155+ if (check_heap_stack_gap(vma, addr, len))
71156+ return addr;
71157+ }
71158 }
71159
71160 /* check if free_area_cache is useful for us */
71161@@ -1520,7 +1730,7 @@ try_again:
71162 * return with success:
71163 */
71164 vma = find_vma(mm, addr);
71165- if (!vma || addr+len <= vma->vm_start)
71166+ if (check_heap_stack_gap(vma, addr, len))
71167 /* remember the address as a hint for next time */
71168 return (mm->free_area_cache = addr);
71169
71170@@ -1529,8 +1739,8 @@ try_again:
71171 mm->cached_hole_size = vma->vm_start - addr;
71172
71173 /* try just below the current vma->vm_start */
71174- addr = vma->vm_start-len;
71175- } while (len < vma->vm_start);
71176+ addr = skip_heap_stack_gap(vma, len);
71177+ } while (!IS_ERR_VALUE(addr));
71178
71179 fail:
71180 /*
71181@@ -1553,13 +1763,21 @@ fail:
71182 * can happen with large stack limits and large mmap()
71183 * allocations.
71184 */
71185+ mm->mmap_base = TASK_UNMAPPED_BASE;
71186+
71187+#ifdef CONFIG_PAX_RANDMMAP
71188+ if (mm->pax_flags & MF_PAX_RANDMMAP)
71189+ mm->mmap_base += mm->delta_mmap;
71190+#endif
71191+
71192+ mm->free_area_cache = mm->mmap_base;
71193 mm->cached_hole_size = ~0UL;
71194- mm->free_area_cache = TASK_UNMAPPED_BASE;
71195 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
71196 /*
71197 * Restore the topdown base:
71198 */
71199- mm->free_area_cache = mm->mmap_base;
71200+ mm->mmap_base = base;
71201+ mm->free_area_cache = base;
71202 mm->cached_hole_size = ~0UL;
71203
71204 return addr;
71205@@ -1568,6 +1786,12 @@ fail:
71206
71207 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71208 {
71209+
71210+#ifdef CONFIG_PAX_SEGMEXEC
71211+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71212+ return;
71213+#endif
71214+
71215 /*
71216 * Is this a new hole at the highest possible address?
71217 */
71218@@ -1575,8 +1799,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71219 mm->free_area_cache = addr;
71220
71221 /* dont allow allocations above current base */
71222- if (mm->free_area_cache > mm->mmap_base)
71223+ if (mm->free_area_cache > mm->mmap_base) {
71224 mm->free_area_cache = mm->mmap_base;
71225+ mm->cached_hole_size = ~0UL;
71226+ }
71227 }
71228
71229 unsigned long
71230@@ -1672,6 +1898,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
71231 return vma;
71232 }
71233
71234+#ifdef CONFIG_PAX_SEGMEXEC
71235+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
71236+{
71237+ struct vm_area_struct *vma_m;
71238+
71239+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
71240+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
71241+ BUG_ON(vma->vm_mirror);
71242+ return NULL;
71243+ }
71244+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
71245+ vma_m = vma->vm_mirror;
71246+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
71247+ BUG_ON(vma->vm_file != vma_m->vm_file);
71248+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
71249+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
71250+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
71251+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
71252+ return vma_m;
71253+}
71254+#endif
71255+
71256 /*
71257 * Verify that the stack growth is acceptable and
71258 * update accounting. This is shared with both the
71259@@ -1688,6 +1936,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71260 return -ENOMEM;
71261
71262 /* Stack limit test */
71263+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
71264 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
71265 return -ENOMEM;
71266
71267@@ -1698,6 +1947,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71268 locked = mm->locked_vm + grow;
71269 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
71270 limit >>= PAGE_SHIFT;
71271+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
71272 if (locked > limit && !capable(CAP_IPC_LOCK))
71273 return -ENOMEM;
71274 }
71275@@ -1728,37 +1978,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71276 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
71277 * vma is the last one with address > vma->vm_end. Have to extend vma.
71278 */
71279+#ifndef CONFIG_IA64
71280+static
71281+#endif
71282 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
71283 {
71284 int error;
71285+ bool locknext;
71286
71287 if (!(vma->vm_flags & VM_GROWSUP))
71288 return -EFAULT;
71289
71290+ /* Also guard against wrapping around to address 0. */
71291+ if (address < PAGE_ALIGN(address+1))
71292+ address = PAGE_ALIGN(address+1);
71293+ else
71294+ return -ENOMEM;
71295+
71296 /*
71297 * We must make sure the anon_vma is allocated
71298 * so that the anon_vma locking is not a noop.
71299 */
71300 if (unlikely(anon_vma_prepare(vma)))
71301 return -ENOMEM;
71302+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
71303+ if (locknext && anon_vma_prepare(vma->vm_next))
71304+ return -ENOMEM;
71305 vma_lock_anon_vma(vma);
71306+ if (locknext)
71307+ vma_lock_anon_vma(vma->vm_next);
71308
71309 /*
71310 * vma->vm_start/vm_end cannot change under us because the caller
71311 * is required to hold the mmap_sem in read mode. We need the
71312- * anon_vma lock to serialize against concurrent expand_stacks.
71313- * Also guard against wrapping around to address 0.
71314+ * anon_vma locks to serialize against concurrent expand_stacks
71315+ * and expand_upwards.
71316 */
71317- if (address < PAGE_ALIGN(address+4))
71318- address = PAGE_ALIGN(address+4);
71319- else {
71320- vma_unlock_anon_vma(vma);
71321- return -ENOMEM;
71322- }
71323 error = 0;
71324
71325 /* Somebody else might have raced and expanded it already */
71326- if (address > vma->vm_end) {
71327+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
71328+ error = -ENOMEM;
71329+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
71330 unsigned long size, grow;
71331
71332 size = address - vma->vm_start;
71333@@ -1773,6 +2034,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
71334 }
71335 }
71336 }
71337+ if (locknext)
71338+ vma_unlock_anon_vma(vma->vm_next);
71339 vma_unlock_anon_vma(vma);
71340 khugepaged_enter_vma_merge(vma);
71341 return error;
71342@@ -1786,6 +2049,8 @@ int expand_downwards(struct vm_area_struct *vma,
71343 unsigned long address)
71344 {
71345 int error;
71346+ bool lockprev = false;
71347+ struct vm_area_struct *prev;
71348
71349 /*
71350 * We must make sure the anon_vma is allocated
71351@@ -1799,6 +2064,15 @@ int expand_downwards(struct vm_area_struct *vma,
71352 if (error)
71353 return error;
71354
71355+ prev = vma->vm_prev;
71356+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
71357+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
71358+#endif
71359+ if (lockprev && anon_vma_prepare(prev))
71360+ return -ENOMEM;
71361+ if (lockprev)
71362+ vma_lock_anon_vma(prev);
71363+
71364 vma_lock_anon_vma(vma);
71365
71366 /*
71367@@ -1808,9 +2082,17 @@ int expand_downwards(struct vm_area_struct *vma,
71368 */
71369
71370 /* Somebody else might have raced and expanded it already */
71371- if (address < vma->vm_start) {
71372+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
71373+ error = -ENOMEM;
71374+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
71375 unsigned long size, grow;
71376
71377+#ifdef CONFIG_PAX_SEGMEXEC
71378+ struct vm_area_struct *vma_m;
71379+
71380+ vma_m = pax_find_mirror_vma(vma);
71381+#endif
71382+
71383 size = vma->vm_end - address;
71384 grow = (vma->vm_start - address) >> PAGE_SHIFT;
71385
71386@@ -1820,11 +2102,22 @@ int expand_downwards(struct vm_area_struct *vma,
71387 if (!error) {
71388 vma->vm_start = address;
71389 vma->vm_pgoff -= grow;
71390+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
71391+
71392+#ifdef CONFIG_PAX_SEGMEXEC
71393+ if (vma_m) {
71394+ vma_m->vm_start -= grow << PAGE_SHIFT;
71395+ vma_m->vm_pgoff -= grow;
71396+ }
71397+#endif
71398+
71399 perf_event_mmap(vma);
71400 }
71401 }
71402 }
71403 vma_unlock_anon_vma(vma);
71404+ if (lockprev)
71405+ vma_unlock_anon_vma(prev);
71406 khugepaged_enter_vma_merge(vma);
71407 return error;
71408 }
71409@@ -1894,6 +2187,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
71410 do {
71411 long nrpages = vma_pages(vma);
71412
71413+#ifdef CONFIG_PAX_SEGMEXEC
71414+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
71415+ vma = remove_vma(vma);
71416+ continue;
71417+ }
71418+#endif
71419+
71420 mm->total_vm -= nrpages;
71421 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
71422 vma = remove_vma(vma);
71423@@ -1939,6 +2239,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
71424 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
71425 vma->vm_prev = NULL;
71426 do {
71427+
71428+#ifdef CONFIG_PAX_SEGMEXEC
71429+ if (vma->vm_mirror) {
71430+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
71431+ vma->vm_mirror->vm_mirror = NULL;
71432+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
71433+ vma->vm_mirror = NULL;
71434+ }
71435+#endif
71436+
71437 rb_erase(&vma->vm_rb, &mm->mm_rb);
71438 mm->map_count--;
71439 tail_vma = vma;
71440@@ -1967,14 +2277,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71441 struct vm_area_struct *new;
71442 int err = -ENOMEM;
71443
71444+#ifdef CONFIG_PAX_SEGMEXEC
71445+ struct vm_area_struct *vma_m, *new_m = NULL;
71446+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
71447+#endif
71448+
71449 if (is_vm_hugetlb_page(vma) && (addr &
71450 ~(huge_page_mask(hstate_vma(vma)))))
71451 return -EINVAL;
71452
71453+#ifdef CONFIG_PAX_SEGMEXEC
71454+ vma_m = pax_find_mirror_vma(vma);
71455+#endif
71456+
71457 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
71458 if (!new)
71459 goto out_err;
71460
71461+#ifdef CONFIG_PAX_SEGMEXEC
71462+ if (vma_m) {
71463+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
71464+ if (!new_m) {
71465+ kmem_cache_free(vm_area_cachep, new);
71466+ goto out_err;
71467+ }
71468+ }
71469+#endif
71470+
71471 /* most fields are the same, copy all, and then fixup */
71472 *new = *vma;
71473
71474@@ -1987,6 +2316,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71475 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
71476 }
71477
71478+#ifdef CONFIG_PAX_SEGMEXEC
71479+ if (vma_m) {
71480+ *new_m = *vma_m;
71481+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
71482+ new_m->vm_mirror = new;
71483+ new->vm_mirror = new_m;
71484+
71485+ if (new_below)
71486+ new_m->vm_end = addr_m;
71487+ else {
71488+ new_m->vm_start = addr_m;
71489+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
71490+ }
71491+ }
71492+#endif
71493+
71494 pol = mpol_dup(vma_policy(vma));
71495 if (IS_ERR(pol)) {
71496 err = PTR_ERR(pol);
71497@@ -2012,6 +2357,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71498 else
71499 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
71500
71501+#ifdef CONFIG_PAX_SEGMEXEC
71502+ if (!err && vma_m) {
71503+ if (anon_vma_clone(new_m, vma_m))
71504+ goto out_free_mpol;
71505+
71506+ mpol_get(pol);
71507+ vma_set_policy(new_m, pol);
71508+
71509+ if (new_m->vm_file) {
71510+ get_file(new_m->vm_file);
71511+ if (vma_m->vm_flags & VM_EXECUTABLE)
71512+ added_exe_file_vma(mm);
71513+ }
71514+
71515+ if (new_m->vm_ops && new_m->vm_ops->open)
71516+ new_m->vm_ops->open(new_m);
71517+
71518+ if (new_below)
71519+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
71520+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
71521+ else
71522+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
71523+
71524+ if (err) {
71525+ if (new_m->vm_ops && new_m->vm_ops->close)
71526+ new_m->vm_ops->close(new_m);
71527+ if (new_m->vm_file) {
71528+ if (vma_m->vm_flags & VM_EXECUTABLE)
71529+ removed_exe_file_vma(mm);
71530+ fput(new_m->vm_file);
71531+ }
71532+ mpol_put(pol);
71533+ }
71534+ }
71535+#endif
71536+
71537 /* Success. */
71538 if (!err)
71539 return 0;
71540@@ -2024,10 +2405,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71541 removed_exe_file_vma(mm);
71542 fput(new->vm_file);
71543 }
71544- unlink_anon_vmas(new);
71545 out_free_mpol:
71546 mpol_put(pol);
71547 out_free_vma:
71548+
71549+#ifdef CONFIG_PAX_SEGMEXEC
71550+ if (new_m) {
71551+ unlink_anon_vmas(new_m);
71552+ kmem_cache_free(vm_area_cachep, new_m);
71553+ }
71554+#endif
71555+
71556+ unlink_anon_vmas(new);
71557 kmem_cache_free(vm_area_cachep, new);
71558 out_err:
71559 return err;
71560@@ -2040,6 +2429,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71561 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
71562 unsigned long addr, int new_below)
71563 {
71564+
71565+#ifdef CONFIG_PAX_SEGMEXEC
71566+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
71567+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
71568+ if (mm->map_count >= sysctl_max_map_count-1)
71569+ return -ENOMEM;
71570+ } else
71571+#endif
71572+
71573 if (mm->map_count >= sysctl_max_map_count)
71574 return -ENOMEM;
71575
71576@@ -2051,11 +2449,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
71577 * work. This now handles partial unmappings.
71578 * Jeremy Fitzhardinge <jeremy@goop.org>
71579 */
71580+#ifdef CONFIG_PAX_SEGMEXEC
71581 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71582 {
71583+ int ret = __do_munmap(mm, start, len);
71584+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
71585+ return ret;
71586+
71587+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
71588+}
71589+
71590+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71591+#else
71592+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71593+#endif
71594+{
71595 unsigned long end;
71596 struct vm_area_struct *vma, *prev, *last;
71597
71598+ /*
71599+ * mm->mmap_sem is required to protect against another thread
71600+ * changing the mappings in case we sleep.
71601+ */
71602+ verify_mm_writelocked(mm);
71603+
71604 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
71605 return -EINVAL;
71606
71607@@ -2130,6 +2547,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71608 /* Fix up all other VM information */
71609 remove_vma_list(mm, vma);
71610
71611+ track_exec_limit(mm, start, end, 0UL);
71612+
71613 return 0;
71614 }
71615 EXPORT_SYMBOL(do_munmap);
71616@@ -2139,6 +2558,13 @@ int vm_munmap(unsigned long start, size_t len)
71617 int ret;
71618 struct mm_struct *mm = current->mm;
71619
71620+
71621+#ifdef CONFIG_PAX_SEGMEXEC
71622+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
71623+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
71624+ return -EINVAL;
71625+#endif
71626+
71627 down_write(&mm->mmap_sem);
71628 ret = do_munmap(mm, start, len);
71629 up_write(&mm->mmap_sem);
71630@@ -2152,16 +2578,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
71631 return vm_munmap(addr, len);
71632 }
71633
71634-static inline void verify_mm_writelocked(struct mm_struct *mm)
71635-{
71636-#ifdef CONFIG_DEBUG_VM
71637- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
71638- WARN_ON(1);
71639- up_read(&mm->mmap_sem);
71640- }
71641-#endif
71642-}
71643-
71644 /*
71645 * this is really a simplified "do_mmap". it only handles
71646 * anonymous maps. eventually we may be able to do some
71647@@ -2175,6 +2591,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
71648 struct rb_node ** rb_link, * rb_parent;
71649 pgoff_t pgoff = addr >> PAGE_SHIFT;
71650 int error;
71651+ unsigned long charged;
71652
71653 len = PAGE_ALIGN(len);
71654 if (!len)
71655@@ -2186,16 +2603,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
71656
71657 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
71658
71659+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
71660+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
71661+ flags &= ~VM_EXEC;
71662+
71663+#ifdef CONFIG_PAX_MPROTECT
71664+ if (mm->pax_flags & MF_PAX_MPROTECT)
71665+ flags &= ~VM_MAYEXEC;
71666+#endif
71667+
71668+ }
71669+#endif
71670+
71671 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
71672 if (error & ~PAGE_MASK)
71673 return error;
71674
71675+ charged = len >> PAGE_SHIFT;
71676+
71677 /*
71678 * mlock MCL_FUTURE?
71679 */
71680 if (mm->def_flags & VM_LOCKED) {
71681 unsigned long locked, lock_limit;
71682- locked = len >> PAGE_SHIFT;
71683+ locked = charged;
71684 locked += mm->locked_vm;
71685 lock_limit = rlimit(RLIMIT_MEMLOCK);
71686 lock_limit >>= PAGE_SHIFT;
71687@@ -2212,22 +2643,22 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
71688 /*
71689 * Clear old maps. this also does some error checking for us
71690 */
71691- munmap_back:
71692 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71693 if (vma && vma->vm_start < addr + len) {
71694 if (do_munmap(mm, addr, len))
71695 return -ENOMEM;
71696- goto munmap_back;
71697- }
71698+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71699+ BUG_ON(vma && vma->vm_start < addr + len);
71700+ }
71701
71702 /* Check against address space limits *after* clearing old maps... */
71703- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
71704+ if (!may_expand_vm(mm, charged))
71705 return -ENOMEM;
71706
71707 if (mm->map_count > sysctl_max_map_count)
71708 return -ENOMEM;
71709
71710- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
71711+ if (security_vm_enough_memory_mm(mm, charged))
71712 return -ENOMEM;
71713
71714 /* Can we just expand an old private anonymous mapping? */
71715@@ -2241,7 +2672,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
71716 */
71717 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71718 if (!vma) {
71719- vm_unacct_memory(len >> PAGE_SHIFT);
71720+ vm_unacct_memory(charged);
71721 return -ENOMEM;
71722 }
71723
71724@@ -2255,11 +2686,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
71725 vma_link(mm, vma, prev, rb_link, rb_parent);
71726 out:
71727 perf_event_mmap(vma);
71728- mm->total_vm += len >> PAGE_SHIFT;
71729+ mm->total_vm += charged;
71730 if (flags & VM_LOCKED) {
71731 if (!mlock_vma_pages_range(vma, addr, addr + len))
71732- mm->locked_vm += (len >> PAGE_SHIFT);
71733+ mm->locked_vm += charged;
71734 }
71735+ track_exec_limit(mm, addr, addr + len, flags);
71736 return addr;
71737 }
71738
71739@@ -2315,8 +2747,10 @@ void exit_mmap(struct mm_struct *mm)
71740 * Walk the list again, actually closing and freeing it,
71741 * with preemption enabled, without holding any MM locks.
71742 */
71743- while (vma)
71744+ while (vma) {
71745+ vma->vm_mirror = NULL;
71746 vma = remove_vma(vma);
71747+ }
71748
71749 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
71750 }
71751@@ -2330,6 +2764,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
71752 struct vm_area_struct * __vma, * prev;
71753 struct rb_node ** rb_link, * rb_parent;
71754
71755+#ifdef CONFIG_PAX_SEGMEXEC
71756+ struct vm_area_struct *vma_m = NULL;
71757+#endif
71758+
71759+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
71760+ return -EPERM;
71761+
71762 /*
71763 * The vm_pgoff of a purely anonymous vma should be irrelevant
71764 * until its first write fault, when page's anon_vma and index
71765@@ -2352,7 +2793,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
71766 if ((vma->vm_flags & VM_ACCOUNT) &&
71767 security_vm_enough_memory_mm(mm, vma_pages(vma)))
71768 return -ENOMEM;
71769+
71770+#ifdef CONFIG_PAX_SEGMEXEC
71771+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
71772+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71773+ if (!vma_m)
71774+ return -ENOMEM;
71775+ }
71776+#endif
71777+
71778 vma_link(mm, vma, prev, rb_link, rb_parent);
71779+
71780+#ifdef CONFIG_PAX_SEGMEXEC
71781+ if (vma_m)
71782+ BUG_ON(pax_mirror_vma(vma_m, vma));
71783+#endif
71784+
71785 return 0;
71786 }
71787
71788@@ -2371,6 +2827,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
71789 struct mempolicy *pol;
71790 bool faulted_in_anon_vma = true;
71791
71792+ BUG_ON(vma->vm_mirror);
71793+
71794 /*
71795 * If anonymous vma has not yet been faulted, update new pgoff
71796 * to match new location, to increase its chance of merging.
71797@@ -2438,6 +2896,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
71798 return NULL;
71799 }
71800
71801+#ifdef CONFIG_PAX_SEGMEXEC
71802+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
71803+{
71804+ struct vm_area_struct *prev_m;
71805+ struct rb_node **rb_link_m, *rb_parent_m;
71806+ struct mempolicy *pol_m;
71807+
71808+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
71809+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
71810+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
71811+ *vma_m = *vma;
71812+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
71813+ if (anon_vma_clone(vma_m, vma))
71814+ return -ENOMEM;
71815+ pol_m = vma_policy(vma_m);
71816+ mpol_get(pol_m);
71817+ vma_set_policy(vma_m, pol_m);
71818+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
71819+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
71820+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
71821+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
71822+ if (vma_m->vm_file)
71823+ get_file(vma_m->vm_file);
71824+ if (vma_m->vm_ops && vma_m->vm_ops->open)
71825+ vma_m->vm_ops->open(vma_m);
71826+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
71827+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
71828+ vma_m->vm_mirror = vma;
71829+ vma->vm_mirror = vma_m;
71830+ return 0;
71831+}
71832+#endif
71833+
71834 /*
71835 * Return true if the calling process may expand its vm space by the passed
71836 * number of pages
71837@@ -2449,6 +2940,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
71838
71839 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
71840
71841+#ifdef CONFIG_PAX_RANDMMAP
71842+ if (mm->pax_flags & MF_PAX_RANDMMAP)
71843+ cur -= mm->brk_gap;
71844+#endif
71845+
71846+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
71847 if (cur + npages > lim)
71848 return 0;
71849 return 1;
71850@@ -2519,6 +3016,22 @@ int install_special_mapping(struct mm_struct *mm,
71851 vma->vm_start = addr;
71852 vma->vm_end = addr + len;
71853
71854+#ifdef CONFIG_PAX_MPROTECT
71855+ if (mm->pax_flags & MF_PAX_MPROTECT) {
71856+#ifndef CONFIG_PAX_MPROTECT_COMPAT
71857+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
71858+ return -EPERM;
71859+ if (!(vm_flags & VM_EXEC))
71860+ vm_flags &= ~VM_MAYEXEC;
71861+#else
71862+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
71863+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
71864+#endif
71865+ else
71866+ vm_flags &= ~VM_MAYWRITE;
71867+ }
71868+#endif
71869+
71870 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
71871 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
71872
71873diff --git a/mm/mprotect.c b/mm/mprotect.c
71874index a409926..8b32e6d 100644
71875--- a/mm/mprotect.c
71876+++ b/mm/mprotect.c
71877@@ -23,10 +23,17 @@
71878 #include <linux/mmu_notifier.h>
71879 #include <linux/migrate.h>
71880 #include <linux/perf_event.h>
71881+
71882+#ifdef CONFIG_PAX_MPROTECT
71883+#include <linux/elf.h>
71884+#include <linux/binfmts.h>
71885+#endif
71886+
71887 #include <asm/uaccess.h>
71888 #include <asm/pgtable.h>
71889 #include <asm/cacheflush.h>
71890 #include <asm/tlbflush.h>
71891+#include <asm/mmu_context.h>
71892
71893 #ifndef pgprot_modify
71894 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
71895@@ -141,6 +148,48 @@ static void change_protection(struct vm_area_struct *vma,
71896 flush_tlb_range(vma, start, end);
71897 }
71898
71899+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
71900+/* called while holding the mmap semaphor for writing except stack expansion */
71901+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
71902+{
71903+ unsigned long oldlimit, newlimit = 0UL;
71904+
71905+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
71906+ return;
71907+
71908+ spin_lock(&mm->page_table_lock);
71909+ oldlimit = mm->context.user_cs_limit;
71910+ if ((prot & VM_EXEC) && oldlimit < end)
71911+ /* USER_CS limit moved up */
71912+ newlimit = end;
71913+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
71914+ /* USER_CS limit moved down */
71915+ newlimit = start;
71916+
71917+ if (newlimit) {
71918+ mm->context.user_cs_limit = newlimit;
71919+
71920+#ifdef CONFIG_SMP
71921+ wmb();
71922+ cpus_clear(mm->context.cpu_user_cs_mask);
71923+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
71924+#endif
71925+
71926+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
71927+ }
71928+ spin_unlock(&mm->page_table_lock);
71929+ if (newlimit == end) {
71930+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
71931+
71932+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
71933+ if (is_vm_hugetlb_page(vma))
71934+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
71935+ else
71936+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
71937+ }
71938+}
71939+#endif
71940+
71941 int
71942 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
71943 unsigned long start, unsigned long end, unsigned long newflags)
71944@@ -153,11 +202,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
71945 int error;
71946 int dirty_accountable = 0;
71947
71948+#ifdef CONFIG_PAX_SEGMEXEC
71949+ struct vm_area_struct *vma_m = NULL;
71950+ unsigned long start_m, end_m;
71951+
71952+ start_m = start + SEGMEXEC_TASK_SIZE;
71953+ end_m = end + SEGMEXEC_TASK_SIZE;
71954+#endif
71955+
71956 if (newflags == oldflags) {
71957 *pprev = vma;
71958 return 0;
71959 }
71960
71961+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
71962+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
71963+
71964+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
71965+ return -ENOMEM;
71966+
71967+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
71968+ return -ENOMEM;
71969+ }
71970+
71971 /*
71972 * If we make a private mapping writable we increase our commit;
71973 * but (without finer accounting) cannot reduce our commit if we
71974@@ -174,6 +241,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
71975 }
71976 }
71977
71978+#ifdef CONFIG_PAX_SEGMEXEC
71979+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
71980+ if (start != vma->vm_start) {
71981+ error = split_vma(mm, vma, start, 1);
71982+ if (error)
71983+ goto fail;
71984+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
71985+ *pprev = (*pprev)->vm_next;
71986+ }
71987+
71988+ if (end != vma->vm_end) {
71989+ error = split_vma(mm, vma, end, 0);
71990+ if (error)
71991+ goto fail;
71992+ }
71993+
71994+ if (pax_find_mirror_vma(vma)) {
71995+ error = __do_munmap(mm, start_m, end_m - start_m);
71996+ if (error)
71997+ goto fail;
71998+ } else {
71999+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72000+ if (!vma_m) {
72001+ error = -ENOMEM;
72002+ goto fail;
72003+ }
72004+ vma->vm_flags = newflags;
72005+ error = pax_mirror_vma(vma_m, vma);
72006+ if (error) {
72007+ vma->vm_flags = oldflags;
72008+ goto fail;
72009+ }
72010+ }
72011+ }
72012+#endif
72013+
72014 /*
72015 * First try to merge with previous and/or next vma.
72016 */
72017@@ -204,9 +307,21 @@ success:
72018 * vm_flags and vm_page_prot are protected by the mmap_sem
72019 * held in write mode.
72020 */
72021+
72022+#ifdef CONFIG_PAX_SEGMEXEC
72023+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
72024+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
72025+#endif
72026+
72027 vma->vm_flags = newflags;
72028+
72029+#ifdef CONFIG_PAX_MPROTECT
72030+ if (mm->binfmt && mm->binfmt->handle_mprotect)
72031+ mm->binfmt->handle_mprotect(vma, newflags);
72032+#endif
72033+
72034 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
72035- vm_get_page_prot(newflags));
72036+ vm_get_page_prot(vma->vm_flags));
72037
72038 if (vma_wants_writenotify(vma)) {
72039 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
72040@@ -248,6 +363,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72041 end = start + len;
72042 if (end <= start)
72043 return -ENOMEM;
72044+
72045+#ifdef CONFIG_PAX_SEGMEXEC
72046+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
72047+ if (end > SEGMEXEC_TASK_SIZE)
72048+ return -EINVAL;
72049+ } else
72050+#endif
72051+
72052+ if (end > TASK_SIZE)
72053+ return -EINVAL;
72054+
72055 if (!arch_validate_prot(prot))
72056 return -EINVAL;
72057
72058@@ -255,7 +381,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72059 /*
72060 * Does the application expect PROT_READ to imply PROT_EXEC:
72061 */
72062- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
72063+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
72064 prot |= PROT_EXEC;
72065
72066 vm_flags = calc_vm_prot_bits(prot);
72067@@ -288,6 +414,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72068 if (start > vma->vm_start)
72069 prev = vma;
72070
72071+#ifdef CONFIG_PAX_MPROTECT
72072+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
72073+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
72074+#endif
72075+
72076 for (nstart = start ; ; ) {
72077 unsigned long newflags;
72078
72079@@ -297,6 +428,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72080
72081 /* newflags >> 4 shift VM_MAY% in place of VM_% */
72082 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
72083+ if (prot & (PROT_WRITE | PROT_EXEC))
72084+ gr_log_rwxmprotect(vma->vm_file);
72085+
72086+ error = -EACCES;
72087+ goto out;
72088+ }
72089+
72090+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
72091 error = -EACCES;
72092 goto out;
72093 }
72094@@ -311,6 +450,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72095 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
72096 if (error)
72097 goto out;
72098+
72099+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
72100+
72101 nstart = tmp;
72102
72103 if (nstart < prev->vm_end)
72104diff --git a/mm/mremap.c b/mm/mremap.c
72105index db8d983..76506cb 100644
72106--- a/mm/mremap.c
72107+++ b/mm/mremap.c
72108@@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
72109 continue;
72110 pte = ptep_get_and_clear(mm, old_addr, old_pte);
72111 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
72112+
72113+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72114+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
72115+ pte = pte_exprotect(pte);
72116+#endif
72117+
72118 set_pte_at(mm, new_addr, new_pte, pte);
72119 }
72120
72121@@ -299,6 +305,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
72122 if (is_vm_hugetlb_page(vma))
72123 goto Einval;
72124
72125+#ifdef CONFIG_PAX_SEGMEXEC
72126+ if (pax_find_mirror_vma(vma))
72127+ goto Einval;
72128+#endif
72129+
72130 /* We can't remap across vm area boundaries */
72131 if (old_len > vma->vm_end - addr)
72132 goto Efault;
72133@@ -355,20 +366,25 @@ static unsigned long mremap_to(unsigned long addr,
72134 unsigned long ret = -EINVAL;
72135 unsigned long charged = 0;
72136 unsigned long map_flags;
72137+ unsigned long pax_task_size = TASK_SIZE;
72138
72139 if (new_addr & ~PAGE_MASK)
72140 goto out;
72141
72142- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
72143+#ifdef CONFIG_PAX_SEGMEXEC
72144+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
72145+ pax_task_size = SEGMEXEC_TASK_SIZE;
72146+#endif
72147+
72148+ pax_task_size -= PAGE_SIZE;
72149+
72150+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
72151 goto out;
72152
72153 /* Check if the location we're moving into overlaps the
72154 * old location at all, and fail if it does.
72155 */
72156- if ((new_addr <= addr) && (new_addr+new_len) > addr)
72157- goto out;
72158-
72159- if ((addr <= new_addr) && (addr+old_len) > new_addr)
72160+ if (addr + old_len > new_addr && new_addr + new_len > addr)
72161 goto out;
72162
72163 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72164@@ -440,6 +456,7 @@ unsigned long do_mremap(unsigned long addr,
72165 struct vm_area_struct *vma;
72166 unsigned long ret = -EINVAL;
72167 unsigned long charged = 0;
72168+ unsigned long pax_task_size = TASK_SIZE;
72169
72170 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
72171 goto out;
72172@@ -458,6 +475,17 @@ unsigned long do_mremap(unsigned long addr,
72173 if (!new_len)
72174 goto out;
72175
72176+#ifdef CONFIG_PAX_SEGMEXEC
72177+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
72178+ pax_task_size = SEGMEXEC_TASK_SIZE;
72179+#endif
72180+
72181+ pax_task_size -= PAGE_SIZE;
72182+
72183+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
72184+ old_len > pax_task_size || addr > pax_task_size-old_len)
72185+ goto out;
72186+
72187 if (flags & MREMAP_FIXED) {
72188 if (flags & MREMAP_MAYMOVE)
72189 ret = mremap_to(addr, old_len, new_addr, new_len);
72190@@ -507,6 +535,7 @@ unsigned long do_mremap(unsigned long addr,
72191 addr + new_len);
72192 }
72193 ret = addr;
72194+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
72195 goto out;
72196 }
72197 }
72198@@ -533,7 +562,13 @@ unsigned long do_mremap(unsigned long addr,
72199 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72200 if (ret)
72201 goto out;
72202+
72203+ map_flags = vma->vm_flags;
72204 ret = move_vma(vma, addr, old_len, new_len, new_addr);
72205+ if (!(ret & ~PAGE_MASK)) {
72206+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
72207+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
72208+ }
72209 }
72210 out:
72211 if (ret & ~PAGE_MASK)
72212diff --git a/mm/nommu.c b/mm/nommu.c
72213index bb8f4f0..40d3e02 100644
72214--- a/mm/nommu.c
72215+++ b/mm/nommu.c
72216@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
72217 int sysctl_overcommit_ratio = 50; /* default is 50% */
72218 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
72219 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
72220-int heap_stack_gap = 0;
72221
72222 atomic_long_t mmap_pages_allocated;
72223
72224@@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
72225 EXPORT_SYMBOL(find_vma);
72226
72227 /*
72228- * find a VMA
72229- * - we don't extend stack VMAs under NOMMU conditions
72230- */
72231-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
72232-{
72233- return find_vma(mm, addr);
72234-}
72235-
72236-/*
72237 * expand a stack to a given address
72238 * - not supported under NOMMU conditions
72239 */
72240@@ -1580,6 +1570,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72241
72242 /* most fields are the same, copy all, and then fixup */
72243 *new = *vma;
72244+ INIT_LIST_HEAD(&new->anon_vma_chain);
72245 *region = *vma->vm_region;
72246 new->vm_region = region;
72247
72248diff --git a/mm/page_alloc.c b/mm/page_alloc.c
72249index 918330f..ae99ae1 100644
72250--- a/mm/page_alloc.c
72251+++ b/mm/page_alloc.c
72252@@ -335,7 +335,7 @@ out:
72253 * This usage means that zero-order pages may not be compound.
72254 */
72255
72256-static void free_compound_page(struct page *page)
72257+void free_compound_page(struct page *page)
72258 {
72259 __free_pages_ok(page, compound_order(page));
72260 }
72261@@ -692,6 +692,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
72262 int i;
72263 int bad = 0;
72264
72265+#ifdef CONFIG_PAX_MEMORY_SANITIZE
72266+ unsigned long index = 1UL << order;
72267+#endif
72268+
72269 trace_mm_page_free(page, order);
72270 kmemcheck_free_shadow(page, order);
72271
72272@@ -707,6 +711,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
72273 debug_check_no_obj_freed(page_address(page),
72274 PAGE_SIZE << order);
72275 }
72276+
72277+#ifdef CONFIG_PAX_MEMORY_SANITIZE
72278+ for (; index; --index)
72279+ sanitize_highpage(page + index - 1);
72280+#endif
72281+
72282 arch_free_page(page, order);
72283 kernel_map_pages(page, 1 << order, 0);
72284
72285@@ -830,8 +840,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
72286 arch_alloc_page(page, order);
72287 kernel_map_pages(page, 1 << order, 1);
72288
72289+#ifndef CONFIG_PAX_MEMORY_SANITIZE
72290 if (gfp_flags & __GFP_ZERO)
72291 prep_zero_page(page, order, gfp_flags);
72292+#endif
72293
72294 if (order && (gfp_flags & __GFP_COMP))
72295 prep_compound_page(page, order);
72296@@ -3523,7 +3535,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
72297 unsigned long pfn;
72298
72299 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
72300+#ifdef CONFIG_X86_32
72301+ /* boot failures in VMware 8 on 32bit vanilla since
72302+ this change */
72303+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
72304+#else
72305 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
72306+#endif
72307 return 1;
72308 }
72309 return 0;
72310diff --git a/mm/percpu.c b/mm/percpu.c
72311index bb4be74..a43ea85 100644
72312--- a/mm/percpu.c
72313+++ b/mm/percpu.c
72314@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
72315 static unsigned int pcpu_high_unit_cpu __read_mostly;
72316
72317 /* the address of the first chunk which starts with the kernel static area */
72318-void *pcpu_base_addr __read_mostly;
72319+void *pcpu_base_addr __read_only;
72320 EXPORT_SYMBOL_GPL(pcpu_base_addr);
72321
72322 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
72323diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
72324index c20ff48..137702a 100644
72325--- a/mm/process_vm_access.c
72326+++ b/mm/process_vm_access.c
72327@@ -13,6 +13,7 @@
72328 #include <linux/uio.h>
72329 #include <linux/sched.h>
72330 #include <linux/highmem.h>
72331+#include <linux/security.h>
72332 #include <linux/ptrace.h>
72333 #include <linux/slab.h>
72334 #include <linux/syscalls.h>
72335@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
72336 size_t iov_l_curr_offset = 0;
72337 ssize_t iov_len;
72338
72339+ return -ENOSYS; // PaX: until properly audited
72340+
72341 /*
72342 * Work out how many pages of struct pages we're going to need
72343 * when eventually calling get_user_pages
72344 */
72345 for (i = 0; i < riovcnt; i++) {
72346 iov_len = rvec[i].iov_len;
72347- if (iov_len > 0) {
72348- nr_pages_iov = ((unsigned long)rvec[i].iov_base
72349- + iov_len)
72350- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
72351- / PAGE_SIZE + 1;
72352- nr_pages = max(nr_pages, nr_pages_iov);
72353- }
72354+ if (iov_len <= 0)
72355+ continue;
72356+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
72357+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
72358+ nr_pages = max(nr_pages, nr_pages_iov);
72359 }
72360
72361 if (nr_pages == 0)
72362@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
72363 goto free_proc_pages;
72364 }
72365
72366+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
72367+ rc = -EPERM;
72368+ goto put_task_struct;
72369+ }
72370+
72371 mm = mm_access(task, PTRACE_MODE_ATTACH);
72372 if (!mm || IS_ERR(mm)) {
72373 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
72374diff --git a/mm/rmap.c b/mm/rmap.c
72375index 5b5ad58..0f77903 100644
72376--- a/mm/rmap.c
72377+++ b/mm/rmap.c
72378@@ -167,6 +167,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72379 struct anon_vma *anon_vma = vma->anon_vma;
72380 struct anon_vma_chain *avc;
72381
72382+#ifdef CONFIG_PAX_SEGMEXEC
72383+ struct anon_vma_chain *avc_m = NULL;
72384+#endif
72385+
72386 might_sleep();
72387 if (unlikely(!anon_vma)) {
72388 struct mm_struct *mm = vma->vm_mm;
72389@@ -176,6 +180,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72390 if (!avc)
72391 goto out_enomem;
72392
72393+#ifdef CONFIG_PAX_SEGMEXEC
72394+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
72395+ if (!avc_m)
72396+ goto out_enomem_free_avc;
72397+#endif
72398+
72399 anon_vma = find_mergeable_anon_vma(vma);
72400 allocated = NULL;
72401 if (!anon_vma) {
72402@@ -189,6 +199,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72403 /* page_table_lock to protect against threads */
72404 spin_lock(&mm->page_table_lock);
72405 if (likely(!vma->anon_vma)) {
72406+
72407+#ifdef CONFIG_PAX_SEGMEXEC
72408+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
72409+
72410+ if (vma_m) {
72411+ BUG_ON(vma_m->anon_vma);
72412+ vma_m->anon_vma = anon_vma;
72413+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
72414+ avc_m = NULL;
72415+ }
72416+#endif
72417+
72418 vma->anon_vma = anon_vma;
72419 anon_vma_chain_link(vma, avc, anon_vma);
72420 allocated = NULL;
72421@@ -199,12 +221,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72422
72423 if (unlikely(allocated))
72424 put_anon_vma(allocated);
72425+
72426+#ifdef CONFIG_PAX_SEGMEXEC
72427+ if (unlikely(avc_m))
72428+ anon_vma_chain_free(avc_m);
72429+#endif
72430+
72431 if (unlikely(avc))
72432 anon_vma_chain_free(avc);
72433 }
72434 return 0;
72435
72436 out_enomem_free_avc:
72437+
72438+#ifdef CONFIG_PAX_SEGMEXEC
72439+ if (avc_m)
72440+ anon_vma_chain_free(avc_m);
72441+#endif
72442+
72443 anon_vma_chain_free(avc);
72444 out_enomem:
72445 return -ENOMEM;
72446@@ -240,7 +274,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
72447 * Attach the anon_vmas from src to dst.
72448 * Returns 0 on success, -ENOMEM on failure.
72449 */
72450-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
72451+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
72452 {
72453 struct anon_vma_chain *avc, *pavc;
72454 struct anon_vma *root = NULL;
72455@@ -318,7 +352,7 @@ void anon_vma_moveto_tail(struct vm_area_struct *dst)
72456 * the corresponding VMA in the parent process is attached to.
72457 * Returns 0 on success, non-zero on failure.
72458 */
72459-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
72460+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
72461 {
72462 struct anon_vma_chain *avc;
72463 struct anon_vma *anon_vma;
72464diff --git a/mm/shmem.c b/mm/shmem.c
72465index f99ff3e..faea8b6 100644
72466--- a/mm/shmem.c
72467+++ b/mm/shmem.c
72468@@ -31,7 +31,7 @@
72469 #include <linux/export.h>
72470 #include <linux/swap.h>
72471
72472-static struct vfsmount *shm_mnt;
72473+struct vfsmount *shm_mnt;
72474
72475 #ifdef CONFIG_SHMEM
72476 /*
72477@@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
72478 #define BOGO_DIRENT_SIZE 20
72479
72480 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
72481-#define SHORT_SYMLINK_LEN 128
72482+#define SHORT_SYMLINK_LEN 64
72483
72484 struct shmem_xattr {
72485 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
72486@@ -2235,8 +2235,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
72487 int err = -ENOMEM;
72488
72489 /* Round up to L1_CACHE_BYTES to resist false sharing */
72490- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
72491- L1_CACHE_BYTES), GFP_KERNEL);
72492+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
72493 if (!sbinfo)
72494 return -ENOMEM;
72495
72496diff --git a/mm/slab.c b/mm/slab.c
72497index e901a36..ee8fe97 100644
72498--- a/mm/slab.c
72499+++ b/mm/slab.c
72500@@ -153,7 +153,7 @@
72501
72502 /* Legal flag mask for kmem_cache_create(). */
72503 #if DEBUG
72504-# define CREATE_MASK (SLAB_RED_ZONE | \
72505+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
72506 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
72507 SLAB_CACHE_DMA | \
72508 SLAB_STORE_USER | \
72509@@ -161,7 +161,7 @@
72510 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
72511 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
72512 #else
72513-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
72514+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
72515 SLAB_CACHE_DMA | \
72516 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
72517 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
72518@@ -290,7 +290,7 @@ struct kmem_list3 {
72519 * Need this for bootstrapping a per node allocator.
72520 */
72521 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
72522-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
72523+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
72524 #define CACHE_CACHE 0
72525 #define SIZE_AC MAX_NUMNODES
72526 #define SIZE_L3 (2 * MAX_NUMNODES)
72527@@ -391,10 +391,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
72528 if ((x)->max_freeable < i) \
72529 (x)->max_freeable = i; \
72530 } while (0)
72531-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
72532-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
72533-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
72534-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
72535+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
72536+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
72537+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
72538+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
72539 #else
72540 #define STATS_INC_ACTIVE(x) do { } while (0)
72541 #define STATS_DEC_ACTIVE(x) do { } while (0)
72542@@ -542,7 +542,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
72543 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
72544 */
72545 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
72546- const struct slab *slab, void *obj)
72547+ const struct slab *slab, const void *obj)
72548 {
72549 u32 offset = (obj - slab->s_mem);
72550 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
72551@@ -568,7 +568,7 @@ struct cache_names {
72552 static struct cache_names __initdata cache_names[] = {
72553 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
72554 #include <linux/kmalloc_sizes.h>
72555- {NULL,}
72556+ {NULL}
72557 #undef CACHE
72558 };
72559
72560@@ -1588,7 +1588,7 @@ void __init kmem_cache_init(void)
72561 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
72562 sizes[INDEX_AC].cs_size,
72563 ARCH_KMALLOC_MINALIGN,
72564- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72565+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72566 NULL);
72567
72568 if (INDEX_AC != INDEX_L3) {
72569@@ -1596,7 +1596,7 @@ void __init kmem_cache_init(void)
72570 kmem_cache_create(names[INDEX_L3].name,
72571 sizes[INDEX_L3].cs_size,
72572 ARCH_KMALLOC_MINALIGN,
72573- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72574+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72575 NULL);
72576 }
72577
72578@@ -1614,7 +1614,7 @@ void __init kmem_cache_init(void)
72579 sizes->cs_cachep = kmem_cache_create(names->name,
72580 sizes->cs_size,
72581 ARCH_KMALLOC_MINALIGN,
72582- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72583+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72584 NULL);
72585 }
72586 #ifdef CONFIG_ZONE_DMA
72587@@ -4390,10 +4390,10 @@ static int s_show(struct seq_file *m, void *p)
72588 }
72589 /* cpu stats */
72590 {
72591- unsigned long allochit = atomic_read(&cachep->allochit);
72592- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
72593- unsigned long freehit = atomic_read(&cachep->freehit);
72594- unsigned long freemiss = atomic_read(&cachep->freemiss);
72595+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
72596+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
72597+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
72598+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
72599
72600 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
72601 allochit, allocmiss, freehit, freemiss);
72602@@ -4652,13 +4652,62 @@ static int __init slab_proc_init(void)
72603 {
72604 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
72605 #ifdef CONFIG_DEBUG_SLAB_LEAK
72606- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
72607+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
72608 #endif
72609 return 0;
72610 }
72611 module_init(slab_proc_init);
72612 #endif
72613
72614+void check_object_size(const void *ptr, unsigned long n, bool to)
72615+{
72616+
72617+#ifdef CONFIG_PAX_USERCOPY
72618+ struct page *page;
72619+ struct kmem_cache *cachep = NULL;
72620+ struct slab *slabp;
72621+ unsigned int objnr;
72622+ unsigned long offset;
72623+ const char *type;
72624+
72625+ if (!n)
72626+ return;
72627+
72628+ type = "<null>";
72629+ if (ZERO_OR_NULL_PTR(ptr))
72630+ goto report;
72631+
72632+ if (!virt_addr_valid(ptr))
72633+ return;
72634+
72635+ page = virt_to_head_page(ptr);
72636+
72637+ type = "<process stack>";
72638+ if (!PageSlab(page)) {
72639+ if (object_is_on_stack(ptr, n) == -1)
72640+ goto report;
72641+ return;
72642+ }
72643+
72644+ cachep = page_get_cache(page);
72645+ type = cachep->name;
72646+ if (!(cachep->flags & SLAB_USERCOPY))
72647+ goto report;
72648+
72649+ slabp = page_get_slab(page);
72650+ objnr = obj_to_index(cachep, slabp, ptr);
72651+ BUG_ON(objnr >= cachep->num);
72652+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
72653+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
72654+ return;
72655+
72656+report:
72657+ pax_report_usercopy(ptr, n, to, type);
72658+#endif
72659+
72660+}
72661+EXPORT_SYMBOL(check_object_size);
72662+
72663 /**
72664 * ksize - get the actual amount of memory allocated for a given object
72665 * @objp: Pointer to the object
72666diff --git a/mm/slob.c b/mm/slob.c
72667index 8105be4..e045f96 100644
72668--- a/mm/slob.c
72669+++ b/mm/slob.c
72670@@ -29,7 +29,7 @@
72671 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
72672 * alloc_pages() directly, allocating compound pages so the page order
72673 * does not have to be separately tracked, and also stores the exact
72674- * allocation size in page->private so that it can be used to accurately
72675+ * allocation size in slob_page->size so that it can be used to accurately
72676 * provide ksize(). These objects are detected in kfree() because slob_page()
72677 * is false for them.
72678 *
72679@@ -58,6 +58,7 @@
72680 */
72681
72682 #include <linux/kernel.h>
72683+#include <linux/sched.h>
72684 #include <linux/slab.h>
72685 #include <linux/mm.h>
72686 #include <linux/swap.h> /* struct reclaim_state */
72687@@ -102,7 +103,8 @@ struct slob_page {
72688 unsigned long flags; /* mandatory */
72689 atomic_t _count; /* mandatory */
72690 slobidx_t units; /* free units left in page */
72691- unsigned long pad[2];
72692+ unsigned long pad[1];
72693+ unsigned long size; /* size when >=PAGE_SIZE */
72694 slob_t *free; /* first free slob_t in page */
72695 struct list_head list; /* linked list of free pages */
72696 };
72697@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
72698 */
72699 static inline int is_slob_page(struct slob_page *sp)
72700 {
72701- return PageSlab((struct page *)sp);
72702+ return PageSlab((struct page *)sp) && !sp->size;
72703 }
72704
72705 static inline void set_slob_page(struct slob_page *sp)
72706@@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
72707
72708 static inline struct slob_page *slob_page(const void *addr)
72709 {
72710- return (struct slob_page *)virt_to_page(addr);
72711+ return (struct slob_page *)virt_to_head_page(addr);
72712 }
72713
72714 /*
72715@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
72716 /*
72717 * Return the size of a slob block.
72718 */
72719-static slobidx_t slob_units(slob_t *s)
72720+static slobidx_t slob_units(const slob_t *s)
72721 {
72722 if (s->units > 0)
72723 return s->units;
72724@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
72725 /*
72726 * Return the next free slob block pointer after this one.
72727 */
72728-static slob_t *slob_next(slob_t *s)
72729+static slob_t *slob_next(const slob_t *s)
72730 {
72731 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
72732 slobidx_t next;
72733@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
72734 /*
72735 * Returns true if s is the last free block in its page.
72736 */
72737-static int slob_last(slob_t *s)
72738+static int slob_last(const slob_t *s)
72739 {
72740 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
72741 }
72742@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
72743 if (!page)
72744 return NULL;
72745
72746+ set_slob_page(page);
72747 return page_address(page);
72748 }
72749
72750@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
72751 if (!b)
72752 return NULL;
72753 sp = slob_page(b);
72754- set_slob_page(sp);
72755
72756 spin_lock_irqsave(&slob_lock, flags);
72757 sp->units = SLOB_UNITS(PAGE_SIZE);
72758 sp->free = b;
72759+ sp->size = 0;
72760 INIT_LIST_HEAD(&sp->list);
72761 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
72762 set_slob_page_free(sp, slob_list);
72763@@ -476,10 +479,9 @@ out:
72764 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
72765 */
72766
72767-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72768+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
72769 {
72770- unsigned int *m;
72771- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72772+ slob_t *m;
72773 void *ret;
72774
72775 gfp &= gfp_allowed_mask;
72776@@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72777
72778 if (!m)
72779 return NULL;
72780- *m = size;
72781+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
72782+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
72783+ m[0].units = size;
72784+ m[1].units = align;
72785 ret = (void *)m + align;
72786
72787 trace_kmalloc_node(_RET_IP_, ret,
72788@@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72789 gfp |= __GFP_COMP;
72790 ret = slob_new_pages(gfp, order, node);
72791 if (ret) {
72792- struct page *page;
72793- page = virt_to_page(ret);
72794- page->private = size;
72795+ struct slob_page *sp;
72796+ sp = slob_page(ret);
72797+ sp->size = size;
72798 }
72799
72800 trace_kmalloc_node(_RET_IP_, ret,
72801 size, PAGE_SIZE << order, gfp, node);
72802 }
72803
72804- kmemleak_alloc(ret, size, 1, gfp);
72805+ return ret;
72806+}
72807+
72808+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72809+{
72810+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72811+ void *ret = __kmalloc_node_align(size, gfp, node, align);
72812+
72813+ if (!ZERO_OR_NULL_PTR(ret))
72814+ kmemleak_alloc(ret, size, 1, gfp);
72815 return ret;
72816 }
72817 EXPORT_SYMBOL(__kmalloc_node);
72818@@ -533,13 +547,92 @@ void kfree(const void *block)
72819 sp = slob_page(block);
72820 if (is_slob_page(sp)) {
72821 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72822- unsigned int *m = (unsigned int *)(block - align);
72823- slob_free(m, *m + align);
72824- } else
72825+ slob_t *m = (slob_t *)(block - align);
72826+ slob_free(m, m[0].units + align);
72827+ } else {
72828+ clear_slob_page(sp);
72829+ free_slob_page(sp);
72830+ sp->size = 0;
72831 put_page(&sp->page);
72832+ }
72833 }
72834 EXPORT_SYMBOL(kfree);
72835
72836+void check_object_size(const void *ptr, unsigned long n, bool to)
72837+{
72838+
72839+#ifdef CONFIG_PAX_USERCOPY
72840+ struct slob_page *sp;
72841+ const slob_t *free;
72842+ const void *base;
72843+ unsigned long flags;
72844+ const char *type;
72845+
72846+ if (!n)
72847+ return;
72848+
72849+ type = "<null>";
72850+ if (ZERO_OR_NULL_PTR(ptr))
72851+ goto report;
72852+
72853+ if (!virt_addr_valid(ptr))
72854+ return;
72855+
72856+ type = "<process stack>";
72857+ sp = slob_page(ptr);
72858+ if (!PageSlab((struct page *)sp)) {
72859+ if (object_is_on_stack(ptr, n) == -1)
72860+ goto report;
72861+ return;
72862+ }
72863+
72864+ type = "<slob>";
72865+ if (sp->size) {
72866+ base = page_address(&sp->page);
72867+ if (base <= ptr && n <= sp->size - (ptr - base))
72868+ return;
72869+ goto report;
72870+ }
72871+
72872+ /* some tricky double walking to find the chunk */
72873+ spin_lock_irqsave(&slob_lock, flags);
72874+ base = (void *)((unsigned long)ptr & PAGE_MASK);
72875+ free = sp->free;
72876+
72877+ while (!slob_last(free) && (void *)free <= ptr) {
72878+ base = free + slob_units(free);
72879+ free = slob_next(free);
72880+ }
72881+
72882+ while (base < (void *)free) {
72883+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
72884+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
72885+ int offset;
72886+
72887+ if (ptr < base + align)
72888+ break;
72889+
72890+ offset = ptr - base - align;
72891+ if (offset >= m) {
72892+ base += size;
72893+ continue;
72894+ }
72895+
72896+ if (n > m - offset)
72897+ break;
72898+
72899+ spin_unlock_irqrestore(&slob_lock, flags);
72900+ return;
72901+ }
72902+
72903+ spin_unlock_irqrestore(&slob_lock, flags);
72904+report:
72905+ pax_report_usercopy(ptr, n, to, type);
72906+#endif
72907+
72908+}
72909+EXPORT_SYMBOL(check_object_size);
72910+
72911 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
72912 size_t ksize(const void *block)
72913 {
72914@@ -552,10 +645,10 @@ size_t ksize(const void *block)
72915 sp = slob_page(block);
72916 if (is_slob_page(sp)) {
72917 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72918- unsigned int *m = (unsigned int *)(block - align);
72919- return SLOB_UNITS(*m) * SLOB_UNIT;
72920+ slob_t *m = (slob_t *)(block - align);
72921+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
72922 } else
72923- return sp->page.private;
72924+ return sp->size;
72925 }
72926 EXPORT_SYMBOL(ksize);
72927
72928@@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
72929 {
72930 struct kmem_cache *c;
72931
72932+#ifdef CONFIG_PAX_USERCOPY
72933+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
72934+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
72935+#else
72936 c = slob_alloc(sizeof(struct kmem_cache),
72937 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
72938+#endif
72939
72940 if (c) {
72941 c->name = name;
72942@@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
72943
72944 lockdep_trace_alloc(flags);
72945
72946+#ifdef CONFIG_PAX_USERCOPY
72947+ b = __kmalloc_node_align(c->size, flags, node, c->align);
72948+#else
72949 if (c->size < PAGE_SIZE) {
72950 b = slob_alloc(c->size, flags, c->align, node);
72951 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
72952 SLOB_UNITS(c->size) * SLOB_UNIT,
72953 flags, node);
72954 } else {
72955+ struct slob_page *sp;
72956+
72957 b = slob_new_pages(flags, get_order(c->size), node);
72958+ sp = slob_page(b);
72959+ sp->size = c->size;
72960 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
72961 PAGE_SIZE << get_order(c->size),
72962 flags, node);
72963 }
72964+#endif
72965
72966 if (c->ctor)
72967 c->ctor(b);
72968@@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
72969
72970 static void __kmem_cache_free(void *b, int size)
72971 {
72972- if (size < PAGE_SIZE)
72973+ struct slob_page *sp = slob_page(b);
72974+
72975+ if (is_slob_page(sp))
72976 slob_free(b, size);
72977- else
72978+ else {
72979+ clear_slob_page(sp);
72980+ free_slob_page(sp);
72981+ sp->size = 0;
72982 slob_free_pages(b, get_order(size));
72983+ }
72984 }
72985
72986 static void kmem_rcu_free(struct rcu_head *head)
72987@@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
72988
72989 void kmem_cache_free(struct kmem_cache *c, void *b)
72990 {
72991+ int size = c->size;
72992+
72993+#ifdef CONFIG_PAX_USERCOPY
72994+ if (size + c->align < PAGE_SIZE) {
72995+ size += c->align;
72996+ b -= c->align;
72997+ }
72998+#endif
72999+
73000 kmemleak_free_recursive(b, c->flags);
73001 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
73002 struct slob_rcu *slob_rcu;
73003- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
73004- slob_rcu->size = c->size;
73005+ slob_rcu = b + (size - sizeof(struct slob_rcu));
73006+ slob_rcu->size = size;
73007 call_rcu(&slob_rcu->head, kmem_rcu_free);
73008 } else {
73009- __kmem_cache_free(b, c->size);
73010+ __kmem_cache_free(b, size);
73011 }
73012
73013+#ifdef CONFIG_PAX_USERCOPY
73014+ trace_kfree(_RET_IP_, b);
73015+#else
73016 trace_kmem_cache_free(_RET_IP_, b);
73017+#endif
73018+
73019 }
73020 EXPORT_SYMBOL(kmem_cache_free);
73021
73022diff --git a/mm/slub.c b/mm/slub.c
73023index 71de9b5..dd263c5 100644
73024--- a/mm/slub.c
73025+++ b/mm/slub.c
73026@@ -209,7 +209,7 @@ struct track {
73027
73028 enum track_item { TRACK_ALLOC, TRACK_FREE };
73029
73030-#ifdef CONFIG_SYSFS
73031+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73032 static int sysfs_slab_add(struct kmem_cache *);
73033 static int sysfs_slab_alias(struct kmem_cache *, const char *);
73034 static void sysfs_slab_remove(struct kmem_cache *);
73035@@ -538,7 +538,7 @@ static void print_track(const char *s, struct track *t)
73036 if (!t->addr)
73037 return;
73038
73039- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
73040+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
73041 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
73042 #ifdef CONFIG_STACKTRACE
73043 {
73044@@ -2603,6 +2603,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
73045
73046 page = virt_to_head_page(x);
73047
73048+ BUG_ON(!PageSlab(page));
73049+
73050 slab_free(s, page, x, _RET_IP_);
73051
73052 trace_kmem_cache_free(_RET_IP_, x);
73053@@ -2636,7 +2638,7 @@ static int slub_min_objects;
73054 * Merge control. If this is set then no merging of slab caches will occur.
73055 * (Could be removed. This was introduced to pacify the merge skeptics.)
73056 */
73057-static int slub_nomerge;
73058+static int slub_nomerge = 1;
73059
73060 /*
73061 * Calculate the order of allocation given an slab object size.
73062@@ -3089,7 +3091,7 @@ static int kmem_cache_open(struct kmem_cache *s,
73063 else
73064 s->cpu_partial = 30;
73065
73066- s->refcount = 1;
73067+ atomic_set(&s->refcount, 1);
73068 #ifdef CONFIG_NUMA
73069 s->remote_node_defrag_ratio = 1000;
73070 #endif
73071@@ -3193,8 +3195,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
73072 void kmem_cache_destroy(struct kmem_cache *s)
73073 {
73074 down_write(&slub_lock);
73075- s->refcount--;
73076- if (!s->refcount) {
73077+ if (atomic_dec_and_test(&s->refcount)) {
73078 list_del(&s->list);
73079 up_write(&slub_lock);
73080 if (kmem_cache_close(s)) {
73081@@ -3405,6 +3406,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
73082 EXPORT_SYMBOL(__kmalloc_node);
73083 #endif
73084
73085+void check_object_size(const void *ptr, unsigned long n, bool to)
73086+{
73087+
73088+#ifdef CONFIG_PAX_USERCOPY
73089+ struct page *page;
73090+ struct kmem_cache *s = NULL;
73091+ unsigned long offset;
73092+ const char *type;
73093+
73094+ if (!n)
73095+ return;
73096+
73097+ type = "<null>";
73098+ if (ZERO_OR_NULL_PTR(ptr))
73099+ goto report;
73100+
73101+ if (!virt_addr_valid(ptr))
73102+ return;
73103+
73104+ page = virt_to_head_page(ptr);
73105+
73106+ type = "<process stack>";
73107+ if (!PageSlab(page)) {
73108+ if (object_is_on_stack(ptr, n) == -1)
73109+ goto report;
73110+ return;
73111+ }
73112+
73113+ s = page->slab;
73114+ type = s->name;
73115+ if (!(s->flags & SLAB_USERCOPY))
73116+ goto report;
73117+
73118+ offset = (ptr - page_address(page)) % s->size;
73119+ if (offset <= s->objsize && n <= s->objsize - offset)
73120+ return;
73121+
73122+report:
73123+ pax_report_usercopy(ptr, n, to, type);
73124+#endif
73125+
73126+}
73127+EXPORT_SYMBOL(check_object_size);
73128+
73129 size_t ksize(const void *object)
73130 {
73131 struct page *page;
73132@@ -3679,7 +3724,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
73133 int node;
73134
73135 list_add(&s->list, &slab_caches);
73136- s->refcount = -1;
73137+ atomic_set(&s->refcount, -1);
73138
73139 for_each_node_state(node, N_NORMAL_MEMORY) {
73140 struct kmem_cache_node *n = get_node(s, node);
73141@@ -3799,17 +3844,17 @@ void __init kmem_cache_init(void)
73142
73143 /* Caches that are not of the two-to-the-power-of size */
73144 if (KMALLOC_MIN_SIZE <= 32) {
73145- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
73146+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
73147 caches++;
73148 }
73149
73150 if (KMALLOC_MIN_SIZE <= 64) {
73151- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
73152+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
73153 caches++;
73154 }
73155
73156 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
73157- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
73158+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
73159 caches++;
73160 }
73161
73162@@ -3877,7 +3922,7 @@ static int slab_unmergeable(struct kmem_cache *s)
73163 /*
73164 * We may have set a slab to be unmergeable during bootstrap.
73165 */
73166- if (s->refcount < 0)
73167+ if (atomic_read(&s->refcount) < 0)
73168 return 1;
73169
73170 return 0;
73171@@ -3936,7 +3981,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73172 down_write(&slub_lock);
73173 s = find_mergeable(size, align, flags, name, ctor);
73174 if (s) {
73175- s->refcount++;
73176+ atomic_inc(&s->refcount);
73177 /*
73178 * Adjust the object sizes so that we clear
73179 * the complete object on kzalloc.
73180@@ -3945,7 +3990,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73181 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
73182
73183 if (sysfs_slab_alias(s, name)) {
73184- s->refcount--;
73185+ atomic_dec(&s->refcount);
73186 goto err;
73187 }
73188 up_write(&slub_lock);
73189@@ -4074,7 +4119,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
73190 }
73191 #endif
73192
73193-#ifdef CONFIG_SYSFS
73194+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73195 static int count_inuse(struct page *page)
73196 {
73197 return page->inuse;
73198@@ -4461,12 +4506,12 @@ static void resiliency_test(void)
73199 validate_slab_cache(kmalloc_caches[9]);
73200 }
73201 #else
73202-#ifdef CONFIG_SYSFS
73203+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73204 static void resiliency_test(void) {};
73205 #endif
73206 #endif
73207
73208-#ifdef CONFIG_SYSFS
73209+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73210 enum slab_stat_type {
73211 SL_ALL, /* All slabs */
73212 SL_PARTIAL, /* Only partially allocated slabs */
73213@@ -4709,7 +4754,7 @@ SLAB_ATTR_RO(ctor);
73214
73215 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
73216 {
73217- return sprintf(buf, "%d\n", s->refcount - 1);
73218+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
73219 }
73220 SLAB_ATTR_RO(aliases);
73221
73222@@ -5280,6 +5325,7 @@ static char *create_unique_id(struct kmem_cache *s)
73223 return name;
73224 }
73225
73226+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73227 static int sysfs_slab_add(struct kmem_cache *s)
73228 {
73229 int err;
73230@@ -5342,6 +5388,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
73231 kobject_del(&s->kobj);
73232 kobject_put(&s->kobj);
73233 }
73234+#endif
73235
73236 /*
73237 * Need to buffer aliases during bootup until sysfs becomes
73238@@ -5355,6 +5402,7 @@ struct saved_alias {
73239
73240 static struct saved_alias *alias_list;
73241
73242+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73243 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
73244 {
73245 struct saved_alias *al;
73246@@ -5377,6 +5425,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
73247 alias_list = al;
73248 return 0;
73249 }
73250+#endif
73251
73252 static int __init slab_sysfs_init(void)
73253 {
73254diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
73255index 1b7e22a..3fcd4f3 100644
73256--- a/mm/sparse-vmemmap.c
73257+++ b/mm/sparse-vmemmap.c
73258@@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
73259 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
73260 if (!p)
73261 return NULL;
73262- pud_populate(&init_mm, pud, p);
73263+ pud_populate_kernel(&init_mm, pud, p);
73264 }
73265 return pud;
73266 }
73267@@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
73268 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
73269 if (!p)
73270 return NULL;
73271- pgd_populate(&init_mm, pgd, p);
73272+ pgd_populate_kernel(&init_mm, pgd, p);
73273 }
73274 return pgd;
73275 }
73276diff --git a/mm/swap.c b/mm/swap.c
73277index 5c13f13..f1cfc13 100644
73278--- a/mm/swap.c
73279+++ b/mm/swap.c
73280@@ -30,6 +30,7 @@
73281 #include <linux/backing-dev.h>
73282 #include <linux/memcontrol.h>
73283 #include <linux/gfp.h>
73284+#include <linux/hugetlb.h>
73285
73286 #include "internal.h"
73287
73288@@ -70,6 +71,8 @@ static void __put_compound_page(struct page *page)
73289
73290 __page_cache_release(page);
73291 dtor = get_compound_page_dtor(page);
73292+ if (!PageHuge(page))
73293+ BUG_ON(dtor != free_compound_page);
73294 (*dtor)(page);
73295 }
73296
73297diff --git a/mm/swapfile.c b/mm/swapfile.c
73298index fafc26d..1b7493e 100644
73299--- a/mm/swapfile.c
73300+++ b/mm/swapfile.c
73301@@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
73302
73303 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
73304 /* Activity counter to indicate that a swapon or swapoff has occurred */
73305-static atomic_t proc_poll_event = ATOMIC_INIT(0);
73306+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
73307
73308 static inline unsigned char swap_count(unsigned char ent)
73309 {
73310@@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
73311 }
73312 filp_close(swap_file, NULL);
73313 err = 0;
73314- atomic_inc(&proc_poll_event);
73315+ atomic_inc_unchecked(&proc_poll_event);
73316 wake_up_interruptible(&proc_poll_wait);
73317
73318 out_dput:
73319@@ -1687,8 +1687,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
73320
73321 poll_wait(file, &proc_poll_wait, wait);
73322
73323- if (seq->poll_event != atomic_read(&proc_poll_event)) {
73324- seq->poll_event = atomic_read(&proc_poll_event);
73325+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
73326+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
73327 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
73328 }
73329
73330@@ -1786,7 +1786,7 @@ static int swaps_open(struct inode *inode, struct file *file)
73331 return ret;
73332
73333 seq = file->private_data;
73334- seq->poll_event = atomic_read(&proc_poll_event);
73335+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
73336 return 0;
73337 }
73338
73339@@ -2127,7 +2127,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
73340 (p->flags & SWP_DISCARDABLE) ? "D" : "");
73341
73342 mutex_unlock(&swapon_mutex);
73343- atomic_inc(&proc_poll_event);
73344+ atomic_inc_unchecked(&proc_poll_event);
73345 wake_up_interruptible(&proc_poll_wait);
73346
73347 if (S_ISREG(inode->i_mode))
73348diff --git a/mm/util.c b/mm/util.c
73349index ae962b3..0bba886 100644
73350--- a/mm/util.c
73351+++ b/mm/util.c
73352@@ -284,6 +284,12 @@ done:
73353 void arch_pick_mmap_layout(struct mm_struct *mm)
73354 {
73355 mm->mmap_base = TASK_UNMAPPED_BASE;
73356+
73357+#ifdef CONFIG_PAX_RANDMMAP
73358+ if (mm->pax_flags & MF_PAX_RANDMMAP)
73359+ mm->mmap_base += mm->delta_mmap;
73360+#endif
73361+
73362 mm->get_unmapped_area = arch_get_unmapped_area;
73363 mm->unmap_area = arch_unmap_area;
73364 }
73365diff --git a/mm/vmalloc.c b/mm/vmalloc.c
73366index 1196c77..2e608e8 100644
73367--- a/mm/vmalloc.c
73368+++ b/mm/vmalloc.c
73369@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
73370
73371 pte = pte_offset_kernel(pmd, addr);
73372 do {
73373- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
73374- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
73375+
73376+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73377+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
73378+ BUG_ON(!pte_exec(*pte));
73379+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
73380+ continue;
73381+ }
73382+#endif
73383+
73384+ {
73385+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
73386+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
73387+ }
73388 } while (pte++, addr += PAGE_SIZE, addr != end);
73389 }
73390
73391@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
73392 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
73393 {
73394 pte_t *pte;
73395+ int ret = -ENOMEM;
73396
73397 /*
73398 * nr is a running index into the array which helps higher level
73399@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
73400 pte = pte_alloc_kernel(pmd, addr);
73401 if (!pte)
73402 return -ENOMEM;
73403+
73404+ pax_open_kernel();
73405 do {
73406 struct page *page = pages[*nr];
73407
73408- if (WARN_ON(!pte_none(*pte)))
73409- return -EBUSY;
73410- if (WARN_ON(!page))
73411- return -ENOMEM;
73412+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73413+ if (pgprot_val(prot) & _PAGE_NX)
73414+#endif
73415+
73416+ if (WARN_ON(!pte_none(*pte))) {
73417+ ret = -EBUSY;
73418+ goto out;
73419+ }
73420+ if (WARN_ON(!page)) {
73421+ ret = -ENOMEM;
73422+ goto out;
73423+ }
73424 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
73425 (*nr)++;
73426 } while (pte++, addr += PAGE_SIZE, addr != end);
73427- return 0;
73428+ ret = 0;
73429+out:
73430+ pax_close_kernel();
73431+ return ret;
73432 }
73433
73434 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
73435@@ -119,7 +144,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
73436 pmd_t *pmd;
73437 unsigned long next;
73438
73439- pmd = pmd_alloc(&init_mm, pud, addr);
73440+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
73441 if (!pmd)
73442 return -ENOMEM;
73443 do {
73444@@ -136,7 +161,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
73445 pud_t *pud;
73446 unsigned long next;
73447
73448- pud = pud_alloc(&init_mm, pgd, addr);
73449+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
73450 if (!pud)
73451 return -ENOMEM;
73452 do {
73453@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
73454 * and fall back on vmalloc() if that fails. Others
73455 * just put it in the vmalloc space.
73456 */
73457-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
73458+#ifdef CONFIG_MODULES
73459+#ifdef MODULES_VADDR
73460 unsigned long addr = (unsigned long)x;
73461 if (addr >= MODULES_VADDR && addr < MODULES_END)
73462 return 1;
73463 #endif
73464+
73465+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73466+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
73467+ return 1;
73468+#endif
73469+
73470+#endif
73471+
73472 return is_vmalloc_addr(x);
73473 }
73474
73475@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
73476
73477 if (!pgd_none(*pgd)) {
73478 pud_t *pud = pud_offset(pgd, addr);
73479+#ifdef CONFIG_X86
73480+ if (!pud_large(*pud))
73481+#endif
73482 if (!pud_none(*pud)) {
73483 pmd_t *pmd = pmd_offset(pud, addr);
73484+#ifdef CONFIG_X86
73485+ if (!pmd_large(*pmd))
73486+#endif
73487 if (!pmd_none(*pmd)) {
73488 pte_t *ptep, pte;
73489
73490@@ -332,6 +372,10 @@ static void purge_vmap_area_lazy(void);
73491 static struct vmap_area *alloc_vmap_area(unsigned long size,
73492 unsigned long align,
73493 unsigned long vstart, unsigned long vend,
73494+ int node, gfp_t gfp_mask) __size_overflow(1);
73495+static struct vmap_area *alloc_vmap_area(unsigned long size,
73496+ unsigned long align,
73497+ unsigned long vstart, unsigned long vend,
73498 int node, gfp_t gfp_mask)
73499 {
73500 struct vmap_area *va;
73501@@ -1320,6 +1364,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
73502 struct vm_struct *area;
73503
73504 BUG_ON(in_interrupt());
73505+
73506+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73507+ if (flags & VM_KERNEXEC) {
73508+ if (start != VMALLOC_START || end != VMALLOC_END)
73509+ return NULL;
73510+ start = (unsigned long)MODULES_EXEC_VADDR;
73511+ end = (unsigned long)MODULES_EXEC_END;
73512+ }
73513+#endif
73514+
73515 if (flags & VM_IOREMAP) {
73516 int bit = fls(size);
73517
73518@@ -1552,6 +1606,11 @@ void *vmap(struct page **pages, unsigned int count,
73519 if (count > totalram_pages)
73520 return NULL;
73521
73522+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73523+ if (!(pgprot_val(prot) & _PAGE_NX))
73524+ flags |= VM_KERNEXEC;
73525+#endif
73526+
73527 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
73528 __builtin_return_address(0));
73529 if (!area)
73530@@ -1653,6 +1712,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
73531 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
73532 goto fail;
73533
73534+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73535+ if (!(pgprot_val(prot) & _PAGE_NX))
73536+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
73537+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
73538+ else
73539+#endif
73540+
73541 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
73542 start, end, node, gfp_mask, caller);
73543 if (!area)
73544@@ -1826,10 +1892,9 @@ EXPORT_SYMBOL(vzalloc_node);
73545 * For tight control over page level allocator and protection flags
73546 * use __vmalloc() instead.
73547 */
73548-
73549 void *vmalloc_exec(unsigned long size)
73550 {
73551- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
73552+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
73553 -1, __builtin_return_address(0));
73554 }
73555
73556@@ -2124,6 +2189,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
73557 unsigned long uaddr = vma->vm_start;
73558 unsigned long usize = vma->vm_end - vma->vm_start;
73559
73560+ BUG_ON(vma->vm_mirror);
73561+
73562 if ((PAGE_SIZE-1) & (unsigned long)addr)
73563 return -EINVAL;
73564
73565@@ -2376,8 +2443,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
73566 return NULL;
73567 }
73568
73569- vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL);
73570- vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL);
73571+ vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
73572+ vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
73573 if (!vas || !vms)
73574 goto err_free2;
73575
73576diff --git a/mm/vmstat.c b/mm/vmstat.c
73577index 7db1b9b..e9f6b07 100644
73578--- a/mm/vmstat.c
73579+++ b/mm/vmstat.c
73580@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
73581 *
73582 * vm_stat contains the global counters
73583 */
73584-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
73585+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
73586 EXPORT_SYMBOL(vm_stat);
73587
73588 #ifdef CONFIG_SMP
73589@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
73590 v = p->vm_stat_diff[i];
73591 p->vm_stat_diff[i] = 0;
73592 local_irq_restore(flags);
73593- atomic_long_add(v, &zone->vm_stat[i]);
73594+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
73595 global_diff[i] += v;
73596 #ifdef CONFIG_NUMA
73597 /* 3 seconds idle till flush */
73598@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
73599
73600 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
73601 if (global_diff[i])
73602- atomic_long_add(global_diff[i], &vm_stat[i]);
73603+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
73604 }
73605
73606 #endif
73607@@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
73608 start_cpu_timer(cpu);
73609 #endif
73610 #ifdef CONFIG_PROC_FS
73611- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
73612- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
73613- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
73614- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
73615+ {
73616+ mode_t gr_mode = S_IRUGO;
73617+#ifdef CONFIG_GRKERNSEC_PROC_ADD
73618+ gr_mode = S_IRUSR;
73619+#endif
73620+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
73621+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
73622+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
73623+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
73624+#else
73625+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
73626+#endif
73627+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
73628+ }
73629 #endif
73630 return 0;
73631 }
73632diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
73633index efea35b..9c8dd0b 100644
73634--- a/net/8021q/vlan.c
73635+++ b/net/8021q/vlan.c
73636@@ -554,8 +554,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
73637 err = -EPERM;
73638 if (!capable(CAP_NET_ADMIN))
73639 break;
73640- if ((args.u.name_type >= 0) &&
73641- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
73642+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
73643 struct vlan_net *vn;
73644
73645 vn = net_generic(net, vlan_net_id);
73646diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
73647index fccae26..e7ece2f 100644
73648--- a/net/9p/trans_fd.c
73649+++ b/net/9p/trans_fd.c
73650@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
73651 oldfs = get_fs();
73652 set_fs(get_ds());
73653 /* The cast to a user pointer is valid due to the set_fs() */
73654- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
73655+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
73656 set_fs(oldfs);
73657
73658 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
73659diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
73660index 876fbe8..8bbea9f 100644
73661--- a/net/atm/atm_misc.c
73662+++ b/net/atm/atm_misc.c
73663@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
73664 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
73665 return 1;
73666 atm_return(vcc, truesize);
73667- atomic_inc(&vcc->stats->rx_drop);
73668+ atomic_inc_unchecked(&vcc->stats->rx_drop);
73669 return 0;
73670 }
73671 EXPORT_SYMBOL(atm_charge);
73672@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
73673 }
73674 }
73675 atm_return(vcc, guess);
73676- atomic_inc(&vcc->stats->rx_drop);
73677+ atomic_inc_unchecked(&vcc->stats->rx_drop);
73678 return NULL;
73679 }
73680 EXPORT_SYMBOL(atm_alloc_charge);
73681@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
73682
73683 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
73684 {
73685-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
73686+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
73687 __SONET_ITEMS
73688 #undef __HANDLE_ITEM
73689 }
73690@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
73691
73692 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
73693 {
73694-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
73695+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
73696 __SONET_ITEMS
73697 #undef __HANDLE_ITEM
73698 }
73699diff --git a/net/atm/lec.h b/net/atm/lec.h
73700index dfc0719..47c5322 100644
73701--- a/net/atm/lec.h
73702+++ b/net/atm/lec.h
73703@@ -48,7 +48,7 @@ struct lane2_ops {
73704 const u8 *tlvs, u32 sizeoftlvs);
73705 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
73706 const u8 *tlvs, u32 sizeoftlvs);
73707-};
73708+} __no_const;
73709
73710 /*
73711 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
73712diff --git a/net/atm/mpc.h b/net/atm/mpc.h
73713index 0919a88..a23d54e 100644
73714--- a/net/atm/mpc.h
73715+++ b/net/atm/mpc.h
73716@@ -33,7 +33,7 @@ struct mpoa_client {
73717 struct mpc_parameters parameters; /* parameters for this client */
73718
73719 const struct net_device_ops *old_ops;
73720- struct net_device_ops new_ops;
73721+ net_device_ops_no_const new_ops;
73722 };
73723
73724
73725diff --git a/net/atm/proc.c b/net/atm/proc.c
73726index 0d020de..011c7bb 100644
73727--- a/net/atm/proc.c
73728+++ b/net/atm/proc.c
73729@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
73730 const struct k_atm_aal_stats *stats)
73731 {
73732 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
73733- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
73734- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
73735- atomic_read(&stats->rx_drop));
73736+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
73737+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
73738+ atomic_read_unchecked(&stats->rx_drop));
73739 }
73740
73741 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
73742diff --git a/net/atm/resources.c b/net/atm/resources.c
73743index 23f45ce..c748f1a 100644
73744--- a/net/atm/resources.c
73745+++ b/net/atm/resources.c
73746@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
73747 static void copy_aal_stats(struct k_atm_aal_stats *from,
73748 struct atm_aal_stats *to)
73749 {
73750-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
73751+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
73752 __AAL_STAT_ITEMS
73753 #undef __HANDLE_ITEM
73754 }
73755@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
73756 static void subtract_aal_stats(struct k_atm_aal_stats *from,
73757 struct atm_aal_stats *to)
73758 {
73759-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
73760+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
73761 __AAL_STAT_ITEMS
73762 #undef __HANDLE_ITEM
73763 }
73764diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
73765index a6d5d63..1cc6c2b 100644
73766--- a/net/batman-adv/bat_iv_ogm.c
73767+++ b/net/batman-adv/bat_iv_ogm.c
73768@@ -539,7 +539,7 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
73769
73770 /* change sequence number to network order */
73771 batman_ogm_packet->seqno =
73772- htonl((uint32_t)atomic_read(&hard_iface->seqno));
73773+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
73774
73775 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
73776 batman_ogm_packet->tt_crc = htons((uint16_t)
73777@@ -559,7 +559,7 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
73778 else
73779 batman_ogm_packet->gw_flags = NO_FLAGS;
73780
73781- atomic_inc(&hard_iface->seqno);
73782+ atomic_inc_unchecked(&hard_iface->seqno);
73783
73784 slide_own_bcast_window(hard_iface);
73785 bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
73786@@ -917,7 +917,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
73787 return;
73788
73789 /* could be changed by schedule_own_packet() */
73790- if_incoming_seqno = atomic_read(&if_incoming->seqno);
73791+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
73792
73793 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
73794
73795diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
73796index 3778977..f6a9450 100644
73797--- a/net/batman-adv/hard-interface.c
73798+++ b/net/batman-adv/hard-interface.c
73799@@ -328,8 +328,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
73800 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
73801 dev_add_pack(&hard_iface->batman_adv_ptype);
73802
73803- atomic_set(&hard_iface->seqno, 1);
73804- atomic_set(&hard_iface->frag_seqno, 1);
73805+ atomic_set_unchecked(&hard_iface->seqno, 1);
73806+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
73807 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
73808 hard_iface->net_dev->name);
73809
73810diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
73811index a5590f4..8d31969 100644
73812--- a/net/batman-adv/soft-interface.c
73813+++ b/net/batman-adv/soft-interface.c
73814@@ -645,7 +645,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
73815
73816 /* set broadcast sequence number */
73817 bcast_packet->seqno =
73818- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
73819+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
73820
73821 add_bcast_packet_to_list(bat_priv, skb, 1);
73822
73823@@ -841,7 +841,7 @@ struct net_device *softif_create(const char *name)
73824 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
73825
73826 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
73827- atomic_set(&bat_priv->bcast_seqno, 1);
73828+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
73829 atomic_set(&bat_priv->ttvn, 0);
73830 atomic_set(&bat_priv->tt_local_changes, 0);
73831 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
73832diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
73833index 302efb5..1590365 100644
73834--- a/net/batman-adv/types.h
73835+++ b/net/batman-adv/types.h
73836@@ -38,8 +38,8 @@ struct hard_iface {
73837 int16_t if_num;
73838 char if_status;
73839 struct net_device *net_dev;
73840- atomic_t seqno;
73841- atomic_t frag_seqno;
73842+ atomic_unchecked_t seqno;
73843+ atomic_unchecked_t frag_seqno;
73844 unsigned char *packet_buff;
73845 int packet_len;
73846 struct kobject *hardif_obj;
73847@@ -155,7 +155,7 @@ struct bat_priv {
73848 atomic_t orig_interval; /* uint */
73849 atomic_t hop_penalty; /* uint */
73850 atomic_t log_level; /* uint */
73851- atomic_t bcast_seqno;
73852+ atomic_unchecked_t bcast_seqno;
73853 atomic_t bcast_queue_left;
73854 atomic_t batman_queue_left;
73855 atomic_t ttvn; /* translation table version number */
73856diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
73857index 676f6a6..3b4e668 100644
73858--- a/net/batman-adv/unicast.c
73859+++ b/net/batman-adv/unicast.c
73860@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
73861 frag1->flags = UNI_FRAG_HEAD | large_tail;
73862 frag2->flags = large_tail;
73863
73864- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
73865+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
73866 frag1->seqno = htons(seqno - 1);
73867 frag2->seqno = htons(seqno);
73868
73869diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
73870index 5238b6b..c9798ce 100644
73871--- a/net/bluetooth/hci_conn.c
73872+++ b/net/bluetooth/hci_conn.c
73873@@ -233,7 +233,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
73874 memset(&cp, 0, sizeof(cp));
73875
73876 cp.handle = cpu_to_le16(conn->handle);
73877- memcpy(cp.ltk, ltk, sizeof(ltk));
73878+ memcpy(cp.ltk, ltk, sizeof(cp.ltk));
73879
73880 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
73881 }
73882diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
73883index 6f9c25b..d19fd66 100644
73884--- a/net/bluetooth/l2cap_core.c
73885+++ b/net/bluetooth/l2cap_core.c
73886@@ -2466,8 +2466,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
73887 break;
73888
73889 case L2CAP_CONF_RFC:
73890- if (olen == sizeof(rfc))
73891- memcpy(&rfc, (void *)val, olen);
73892+ if (olen != sizeof(rfc))
73893+ break;
73894+
73895+ memcpy(&rfc, (void *)val, olen);
73896
73897 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
73898 rfc.mode != chan->mode)
73899@@ -2585,8 +2587,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
73900
73901 switch (type) {
73902 case L2CAP_CONF_RFC:
73903- if (olen == sizeof(rfc))
73904- memcpy(&rfc, (void *)val, olen);
73905+ if (olen != sizeof(rfc))
73906+ break;
73907+
73908+ memcpy(&rfc, (void *)val, olen);
73909 goto done;
73910 }
73911 }
73912diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
73913index 5fe2ff3..10968b5 100644
73914--- a/net/bridge/netfilter/ebtables.c
73915+++ b/net/bridge/netfilter/ebtables.c
73916@@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
73917 tmp.valid_hooks = t->table->valid_hooks;
73918 }
73919 mutex_unlock(&ebt_mutex);
73920- if (copy_to_user(user, &tmp, *len) != 0){
73921+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
73922 BUGPRINT("c2u Didn't work\n");
73923 ret = -EFAULT;
73924 break;
73925diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
73926index 5cf5222..6f704ad 100644
73927--- a/net/caif/cfctrl.c
73928+++ b/net/caif/cfctrl.c
73929@@ -9,6 +9,7 @@
73930 #include <linux/stddef.h>
73931 #include <linux/spinlock.h>
73932 #include <linux/slab.h>
73933+#include <linux/sched.h>
73934 #include <net/caif/caif_layer.h>
73935 #include <net/caif/cfpkt.h>
73936 #include <net/caif/cfctrl.h>
73937@@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
73938 memset(&dev_info, 0, sizeof(dev_info));
73939 dev_info.id = 0xff;
73940 cfsrvl_init(&this->serv, 0, &dev_info, false);
73941- atomic_set(&this->req_seq_no, 1);
73942- atomic_set(&this->rsp_seq_no, 1);
73943+ atomic_set_unchecked(&this->req_seq_no, 1);
73944+ atomic_set_unchecked(&this->rsp_seq_no, 1);
73945 this->serv.layer.receive = cfctrl_recv;
73946 sprintf(this->serv.layer.name, "ctrl");
73947 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
73948@@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
73949 struct cfctrl_request_info *req)
73950 {
73951 spin_lock_bh(&ctrl->info_list_lock);
73952- atomic_inc(&ctrl->req_seq_no);
73953- req->sequence_no = atomic_read(&ctrl->req_seq_no);
73954+ atomic_inc_unchecked(&ctrl->req_seq_no);
73955+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
73956 list_add_tail(&req->list, &ctrl->list);
73957 spin_unlock_bh(&ctrl->info_list_lock);
73958 }
73959@@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
73960 if (p != first)
73961 pr_warn("Requests are not received in order\n");
73962
73963- atomic_set(&ctrl->rsp_seq_no,
73964+ atomic_set_unchecked(&ctrl->rsp_seq_no,
73965 p->sequence_no);
73966 list_del(&p->list);
73967 goto out;
73968diff --git a/net/can/gw.c b/net/can/gw.c
73969index 3d79b12..8de85fa 100644
73970--- a/net/can/gw.c
73971+++ b/net/can/gw.c
73972@@ -96,7 +96,7 @@ struct cf_mod {
73973 struct {
73974 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
73975 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
73976- } csumfunc;
73977+ } __no_const csumfunc;
73978 };
73979
73980
73981diff --git a/net/compat.c b/net/compat.c
73982index e055708..3f80795 100644
73983--- a/net/compat.c
73984+++ b/net/compat.c
73985@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
73986 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
73987 __get_user(kmsg->msg_flags, &umsg->msg_flags))
73988 return -EFAULT;
73989- kmsg->msg_name = compat_ptr(tmp1);
73990- kmsg->msg_iov = compat_ptr(tmp2);
73991- kmsg->msg_control = compat_ptr(tmp3);
73992+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
73993+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
73994+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
73995 return 0;
73996 }
73997
73998@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
73999
74000 if (kern_msg->msg_namelen) {
74001 if (mode == VERIFY_READ) {
74002- int err = move_addr_to_kernel(kern_msg->msg_name,
74003+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
74004 kern_msg->msg_namelen,
74005 kern_address);
74006 if (err < 0)
74007@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74008 kern_msg->msg_name = NULL;
74009
74010 tot_len = iov_from_user_compat_to_kern(kern_iov,
74011- (struct compat_iovec __user *)kern_msg->msg_iov,
74012+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
74013 kern_msg->msg_iovlen);
74014 if (tot_len >= 0)
74015 kern_msg->msg_iov = kern_iov;
74016@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74017
74018 #define CMSG_COMPAT_FIRSTHDR(msg) \
74019 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
74020- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
74021+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
74022 (struct compat_cmsghdr __user *)NULL)
74023
74024 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
74025 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
74026 (ucmlen) <= (unsigned long) \
74027 ((mhdr)->msg_controllen - \
74028- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
74029+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
74030
74031 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
74032 struct compat_cmsghdr __user *cmsg, int cmsg_len)
74033 {
74034 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
74035- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
74036+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
74037 msg->msg_controllen)
74038 return NULL;
74039 return (struct compat_cmsghdr __user *)ptr;
74040@@ -219,7 +219,7 @@ Efault:
74041
74042 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
74043 {
74044- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74045+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74046 struct compat_cmsghdr cmhdr;
74047 int cmlen;
74048
74049@@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
74050
74051 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
74052 {
74053- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74054+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74055 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
74056 int fdnum = scm->fp->count;
74057 struct file **fp = scm->fp->fp;
74058@@ -372,7 +372,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
74059 return -EFAULT;
74060 old_fs = get_fs();
74061 set_fs(KERNEL_DS);
74062- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
74063+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
74064 set_fs(old_fs);
74065
74066 return err;
74067@@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
74068 len = sizeof(ktime);
74069 old_fs = get_fs();
74070 set_fs(KERNEL_DS);
74071- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
74072+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
74073 set_fs(old_fs);
74074
74075 if (!err) {
74076@@ -576,7 +576,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74077 case MCAST_JOIN_GROUP:
74078 case MCAST_LEAVE_GROUP:
74079 {
74080- struct compat_group_req __user *gr32 = (void *)optval;
74081+ struct compat_group_req __user *gr32 = (void __user *)optval;
74082 struct group_req __user *kgr =
74083 compat_alloc_user_space(sizeof(struct group_req));
74084 u32 interface;
74085@@ -597,7 +597,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74086 case MCAST_BLOCK_SOURCE:
74087 case MCAST_UNBLOCK_SOURCE:
74088 {
74089- struct compat_group_source_req __user *gsr32 = (void *)optval;
74090+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
74091 struct group_source_req __user *kgsr = compat_alloc_user_space(
74092 sizeof(struct group_source_req));
74093 u32 interface;
74094@@ -618,7 +618,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74095 }
74096 case MCAST_MSFILTER:
74097 {
74098- struct compat_group_filter __user *gf32 = (void *)optval;
74099+ struct compat_group_filter __user *gf32 = (void __user *)optval;
74100 struct group_filter __user *kgf;
74101 u32 interface, fmode, numsrc;
74102
74103@@ -656,7 +656,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
74104 char __user *optval, int __user *optlen,
74105 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
74106 {
74107- struct compat_group_filter __user *gf32 = (void *)optval;
74108+ struct compat_group_filter __user *gf32 = (void __user *)optval;
74109 struct group_filter __user *kgf;
74110 int __user *koptlen;
74111 u32 interface, fmode, numsrc;
74112diff --git a/net/core/datagram.c b/net/core/datagram.c
74113index e4fbfd6..6a6ac94 100644
74114--- a/net/core/datagram.c
74115+++ b/net/core/datagram.c
74116@@ -290,7 +290,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
74117 }
74118
74119 kfree_skb(skb);
74120- atomic_inc(&sk->sk_drops);
74121+ atomic_inc_unchecked(&sk->sk_drops);
74122 sk_mem_reclaim_partial(sk);
74123
74124 return err;
74125diff --git a/net/core/dev.c b/net/core/dev.c
74126index 99e1d75..adf968a 100644
74127--- a/net/core/dev.c
74128+++ b/net/core/dev.c
74129@@ -1136,9 +1136,13 @@ void dev_load(struct net *net, const char *name)
74130 if (no_module && capable(CAP_NET_ADMIN))
74131 no_module = request_module("netdev-%s", name);
74132 if (no_module && capable(CAP_SYS_MODULE)) {
74133+#ifdef CONFIG_GRKERNSEC_MODHARDEN
74134+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
74135+#else
74136 if (!request_module("%s", name))
74137 pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
74138 name);
74139+#endif
74140 }
74141 }
74142 EXPORT_SYMBOL(dev_load);
74143@@ -1602,7 +1606,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
74144 {
74145 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
74146 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
74147- atomic_long_inc(&dev->rx_dropped);
74148+ atomic_long_inc_unchecked(&dev->rx_dropped);
74149 kfree_skb(skb);
74150 return NET_RX_DROP;
74151 }
74152@@ -1612,7 +1616,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
74153 nf_reset(skb);
74154
74155 if (unlikely(!is_skb_forwardable(dev, skb))) {
74156- atomic_long_inc(&dev->rx_dropped);
74157+ atomic_long_inc_unchecked(&dev->rx_dropped);
74158 kfree_skb(skb);
74159 return NET_RX_DROP;
74160 }
74161@@ -2042,7 +2046,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
74162
74163 struct dev_gso_cb {
74164 void (*destructor)(struct sk_buff *skb);
74165-};
74166+} __no_const;
74167
74168 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
74169
74170@@ -2898,7 +2902,7 @@ enqueue:
74171
74172 local_irq_restore(flags);
74173
74174- atomic_long_inc(&skb->dev->rx_dropped);
74175+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
74176 kfree_skb(skb);
74177 return NET_RX_DROP;
74178 }
74179@@ -2970,7 +2974,7 @@ int netif_rx_ni(struct sk_buff *skb)
74180 }
74181 EXPORT_SYMBOL(netif_rx_ni);
74182
74183-static void net_tx_action(struct softirq_action *h)
74184+static void net_tx_action(void)
74185 {
74186 struct softnet_data *sd = &__get_cpu_var(softnet_data);
74187
74188@@ -3258,7 +3262,7 @@ ncls:
74189 if (pt_prev) {
74190 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
74191 } else {
74192- atomic_long_inc(&skb->dev->rx_dropped);
74193+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
74194 kfree_skb(skb);
74195 /* Jamal, now you will not able to escape explaining
74196 * me how you were going to use this. :-)
74197@@ -3818,7 +3822,7 @@ void netif_napi_del(struct napi_struct *napi)
74198 }
74199 EXPORT_SYMBOL(netif_napi_del);
74200
74201-static void net_rx_action(struct softirq_action *h)
74202+static void net_rx_action(void)
74203 {
74204 struct softnet_data *sd = &__get_cpu_var(softnet_data);
74205 unsigned long time_limit = jiffies + 2;
74206@@ -4288,8 +4292,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
74207 else
74208 seq_printf(seq, "%04x", ntohs(pt->type));
74209
74210+#ifdef CONFIG_GRKERNSEC_HIDESYM
74211+ seq_printf(seq, " %-8s %p\n",
74212+ pt->dev ? pt->dev->name : "", NULL);
74213+#else
74214 seq_printf(seq, " %-8s %pF\n",
74215 pt->dev ? pt->dev->name : "", pt->func);
74216+#endif
74217 }
74218
74219 return 0;
74220@@ -5839,7 +5848,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
74221 } else {
74222 netdev_stats_to_stats64(storage, &dev->stats);
74223 }
74224- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
74225+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
74226 return storage;
74227 }
74228 EXPORT_SYMBOL(dev_get_stats);
74229diff --git a/net/core/flow.c b/net/core/flow.c
74230index e318c7e..168b1d0 100644
74231--- a/net/core/flow.c
74232+++ b/net/core/flow.c
74233@@ -61,7 +61,7 @@ struct flow_cache {
74234 struct timer_list rnd_timer;
74235 };
74236
74237-atomic_t flow_cache_genid = ATOMIC_INIT(0);
74238+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
74239 EXPORT_SYMBOL(flow_cache_genid);
74240 static struct flow_cache flow_cache_global;
74241 static struct kmem_cache *flow_cachep __read_mostly;
74242@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
74243
74244 static int flow_entry_valid(struct flow_cache_entry *fle)
74245 {
74246- if (atomic_read(&flow_cache_genid) != fle->genid)
74247+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
74248 return 0;
74249 if (fle->object && !fle->object->ops->check(fle->object))
74250 return 0;
74251@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
74252 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
74253 fcp->hash_count++;
74254 }
74255- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
74256+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
74257 flo = fle->object;
74258 if (!flo)
74259 goto ret_object;
74260@@ -280,7 +280,7 @@ nocache:
74261 }
74262 flo = resolver(net, key, family, dir, flo, ctx);
74263 if (fle) {
74264- fle->genid = atomic_read(&flow_cache_genid);
74265+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
74266 if (!IS_ERR(flo))
74267 fle->object = flo;
74268 else
74269diff --git a/net/core/iovec.c b/net/core/iovec.c
74270index 7e7aeb0..2a998cb 100644
74271--- a/net/core/iovec.c
74272+++ b/net/core/iovec.c
74273@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
74274 if (m->msg_namelen) {
74275 if (mode == VERIFY_READ) {
74276 void __user *namep;
74277- namep = (void __user __force *) m->msg_name;
74278+ namep = (void __force_user *) m->msg_name;
74279 err = move_addr_to_kernel(namep, m->msg_namelen,
74280 address);
74281 if (err < 0)
74282@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
74283 }
74284
74285 size = m->msg_iovlen * sizeof(struct iovec);
74286- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
74287+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
74288 return -EFAULT;
74289
74290 m->msg_iov = iov;
74291diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
74292index 90430b7..0032ec0 100644
74293--- a/net/core/rtnetlink.c
74294+++ b/net/core/rtnetlink.c
74295@@ -56,7 +56,7 @@ struct rtnl_link {
74296 rtnl_doit_func doit;
74297 rtnl_dumpit_func dumpit;
74298 rtnl_calcit_func calcit;
74299-};
74300+} __no_const;
74301
74302 static DEFINE_MUTEX(rtnl_mutex);
74303
74304diff --git a/net/core/scm.c b/net/core/scm.c
74305index 611c5ef..88f6d6d 100644
74306--- a/net/core/scm.c
74307+++ b/net/core/scm.c
74308@@ -219,7 +219,7 @@ EXPORT_SYMBOL(__scm_send);
74309 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
74310 {
74311 struct cmsghdr __user *cm
74312- = (__force struct cmsghdr __user *)msg->msg_control;
74313+ = (struct cmsghdr __force_user *)msg->msg_control;
74314 struct cmsghdr cmhdr;
74315 int cmlen = CMSG_LEN(len);
74316 int err;
74317@@ -242,7 +242,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
74318 err = -EFAULT;
74319 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
74320 goto out;
74321- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
74322+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
74323 goto out;
74324 cmlen = CMSG_SPACE(len);
74325 if (msg->msg_controllen < cmlen)
74326@@ -258,7 +258,7 @@ EXPORT_SYMBOL(put_cmsg);
74327 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
74328 {
74329 struct cmsghdr __user *cm
74330- = (__force struct cmsghdr __user*)msg->msg_control;
74331+ = (struct cmsghdr __force_user *)msg->msg_control;
74332
74333 int fdmax = 0;
74334 int fdnum = scm->fp->count;
74335@@ -278,7 +278,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
74336 if (fdnum < fdmax)
74337 fdmax = fdnum;
74338
74339- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
74340+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
74341 i++, cmfptr++)
74342 {
74343 int new_fd;
74344diff --git a/net/core/sock.c b/net/core/sock.c
74345index b2e14c0..6651b32 100644
74346--- a/net/core/sock.c
74347+++ b/net/core/sock.c
74348@@ -340,7 +340,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74349 struct sk_buff_head *list = &sk->sk_receive_queue;
74350
74351 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
74352- atomic_inc(&sk->sk_drops);
74353+ atomic_inc_unchecked(&sk->sk_drops);
74354 trace_sock_rcvqueue_full(sk, skb);
74355 return -ENOMEM;
74356 }
74357@@ -350,7 +350,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74358 return err;
74359
74360 if (!sk_rmem_schedule(sk, skb->truesize)) {
74361- atomic_inc(&sk->sk_drops);
74362+ atomic_inc_unchecked(&sk->sk_drops);
74363 return -ENOBUFS;
74364 }
74365
74366@@ -370,7 +370,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74367 skb_dst_force(skb);
74368
74369 spin_lock_irqsave(&list->lock, flags);
74370- skb->dropcount = atomic_read(&sk->sk_drops);
74371+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
74372 __skb_queue_tail(list, skb);
74373 spin_unlock_irqrestore(&list->lock, flags);
74374
74375@@ -390,7 +390,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
74376 skb->dev = NULL;
74377
74378 if (sk_rcvqueues_full(sk, skb)) {
74379- atomic_inc(&sk->sk_drops);
74380+ atomic_inc_unchecked(&sk->sk_drops);
74381 goto discard_and_relse;
74382 }
74383 if (nested)
74384@@ -408,7 +408,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
74385 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
74386 } else if (sk_add_backlog(sk, skb)) {
74387 bh_unlock_sock(sk);
74388- atomic_inc(&sk->sk_drops);
74389+ atomic_inc_unchecked(&sk->sk_drops);
74390 goto discard_and_relse;
74391 }
74392
74393@@ -984,7 +984,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74394 if (len > sizeof(peercred))
74395 len = sizeof(peercred);
74396 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
74397- if (copy_to_user(optval, &peercred, len))
74398+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
74399 return -EFAULT;
74400 goto lenout;
74401 }
74402@@ -997,7 +997,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74403 return -ENOTCONN;
74404 if (lv < len)
74405 return -EINVAL;
74406- if (copy_to_user(optval, address, len))
74407+ if (len > sizeof(address) || copy_to_user(optval, address, len))
74408 return -EFAULT;
74409 goto lenout;
74410 }
74411@@ -1043,7 +1043,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74412
74413 if (len > lv)
74414 len = lv;
74415- if (copy_to_user(optval, &v, len))
74416+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
74417 return -EFAULT;
74418 lenout:
74419 if (put_user(len, optlen))
74420@@ -2128,7 +2128,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
74421 */
74422 smp_wmb();
74423 atomic_set(&sk->sk_refcnt, 1);
74424- atomic_set(&sk->sk_drops, 0);
74425+ atomic_set_unchecked(&sk->sk_drops, 0);
74426 }
74427 EXPORT_SYMBOL(sock_init_data);
74428
74429diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
74430index b9868e1..849f809 100644
74431--- a/net/core/sock_diag.c
74432+++ b/net/core/sock_diag.c
74433@@ -16,20 +16,27 @@ static DEFINE_MUTEX(sock_diag_table_mutex);
74434
74435 int sock_diag_check_cookie(void *sk, __u32 *cookie)
74436 {
74437+#ifndef CONFIG_GRKERNSEC_HIDESYM
74438 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
74439 cookie[1] != INET_DIAG_NOCOOKIE) &&
74440 ((u32)(unsigned long)sk != cookie[0] ||
74441 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
74442 return -ESTALE;
74443 else
74444+#endif
74445 return 0;
74446 }
74447 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
74448
74449 void sock_diag_save_cookie(void *sk, __u32 *cookie)
74450 {
74451+#ifdef CONFIG_GRKERNSEC_HIDESYM
74452+ cookie[0] = 0;
74453+ cookie[1] = 0;
74454+#else
74455 cookie[0] = (u32)(unsigned long)sk;
74456 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
74457+#endif
74458 }
74459 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
74460
74461diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
74462index 02e75d1..9a57a7c 100644
74463--- a/net/decnet/sysctl_net_decnet.c
74464+++ b/net/decnet/sysctl_net_decnet.c
74465@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
74466
74467 if (len > *lenp) len = *lenp;
74468
74469- if (copy_to_user(buffer, addr, len))
74470+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
74471 return -EFAULT;
74472
74473 *lenp = len;
74474@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
74475
74476 if (len > *lenp) len = *lenp;
74477
74478- if (copy_to_user(buffer, devname, len))
74479+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
74480 return -EFAULT;
74481
74482 *lenp = len;
74483diff --git a/net/econet/Kconfig b/net/econet/Kconfig
74484index 39a2d29..f39c0fe 100644
74485--- a/net/econet/Kconfig
74486+++ b/net/econet/Kconfig
74487@@ -4,7 +4,7 @@
74488
74489 config ECONET
74490 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
74491- depends on EXPERIMENTAL && INET
74492+ depends on EXPERIMENTAL && INET && BROKEN
74493 ---help---
74494 Econet is a fairly old and slow networking protocol mainly used by
74495 Acorn computers to access file and print servers. It uses native
74496diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
74497index cbe3a68..a879b75 100644
74498--- a/net/ipv4/fib_frontend.c
74499+++ b/net/ipv4/fib_frontend.c
74500@@ -969,12 +969,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
74501 #ifdef CONFIG_IP_ROUTE_MULTIPATH
74502 fib_sync_up(dev);
74503 #endif
74504- atomic_inc(&net->ipv4.dev_addr_genid);
74505+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74506 rt_cache_flush(dev_net(dev), -1);
74507 break;
74508 case NETDEV_DOWN:
74509 fib_del_ifaddr(ifa, NULL);
74510- atomic_inc(&net->ipv4.dev_addr_genid);
74511+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74512 if (ifa->ifa_dev->ifa_list == NULL) {
74513 /* Last address was deleted from this interface.
74514 * Disable IP.
74515@@ -1010,7 +1010,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
74516 #ifdef CONFIG_IP_ROUTE_MULTIPATH
74517 fib_sync_up(dev);
74518 #endif
74519- atomic_inc(&net->ipv4.dev_addr_genid);
74520+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74521 rt_cache_flush(dev_net(dev), -1);
74522 break;
74523 case NETDEV_DOWN:
74524diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
74525index 8861f91..ab1e3c1 100644
74526--- a/net/ipv4/fib_semantics.c
74527+++ b/net/ipv4/fib_semantics.c
74528@@ -698,7 +698,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
74529 nh->nh_saddr = inet_select_addr(nh->nh_dev,
74530 nh->nh_gw,
74531 nh->nh_parent->fib_scope);
74532- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
74533+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
74534
74535 return nh->nh_saddr;
74536 }
74537diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
74538index 984ec65..97ac518 100644
74539--- a/net/ipv4/inet_hashtables.c
74540+++ b/net/ipv4/inet_hashtables.c
74541@@ -18,12 +18,15 @@
74542 #include <linux/sched.h>
74543 #include <linux/slab.h>
74544 #include <linux/wait.h>
74545+#include <linux/security.h>
74546
74547 #include <net/inet_connection_sock.h>
74548 #include <net/inet_hashtables.h>
74549 #include <net/secure_seq.h>
74550 #include <net/ip.h>
74551
74552+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
74553+
74554 /*
74555 * Allocate and initialize a new local port bind bucket.
74556 * The bindhash mutex for snum's hash chain must be held here.
74557@@ -530,6 +533,8 @@ ok:
74558 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
74559 spin_unlock(&head->lock);
74560
74561+ gr_update_task_in_ip_table(current, inet_sk(sk));
74562+
74563 if (tw) {
74564 inet_twsk_deschedule(tw, death_row);
74565 while (twrefcnt) {
74566diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
74567index d4d61b6..b81aec8 100644
74568--- a/net/ipv4/inetpeer.c
74569+++ b/net/ipv4/inetpeer.c
74570@@ -487,8 +487,8 @@ relookup:
74571 if (p) {
74572 p->daddr = *daddr;
74573 atomic_set(&p->refcnt, 1);
74574- atomic_set(&p->rid, 0);
74575- atomic_set(&p->ip_id_count,
74576+ atomic_set_unchecked(&p->rid, 0);
74577+ atomic_set_unchecked(&p->ip_id_count,
74578 (daddr->family == AF_INET) ?
74579 secure_ip_id(daddr->addr.a4) :
74580 secure_ipv6_id(daddr->addr.a6));
74581diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
74582index 3727e23..517f5df 100644
74583--- a/net/ipv4/ip_fragment.c
74584+++ b/net/ipv4/ip_fragment.c
74585@@ -318,7 +318,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
74586 return 0;
74587
74588 start = qp->rid;
74589- end = atomic_inc_return(&peer->rid);
74590+ end = atomic_inc_return_unchecked(&peer->rid);
74591 qp->rid = end;
74592
74593 rc = qp->q.fragments && (end - start) > max;
74594diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
74595index 2fd0fba..83fac99 100644
74596--- a/net/ipv4/ip_sockglue.c
74597+++ b/net/ipv4/ip_sockglue.c
74598@@ -1137,7 +1137,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
74599 len = min_t(unsigned int, len, opt->optlen);
74600 if (put_user(len, optlen))
74601 return -EFAULT;
74602- if (copy_to_user(optval, opt->__data, len))
74603+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
74604+ copy_to_user(optval, opt->__data, len))
74605 return -EFAULT;
74606 return 0;
74607 }
74608@@ -1268,7 +1269,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
74609 if (sk->sk_type != SOCK_STREAM)
74610 return -ENOPROTOOPT;
74611
74612- msg.msg_control = optval;
74613+ msg.msg_control = (void __force_kernel *)optval;
74614 msg.msg_controllen = len;
74615 msg.msg_flags = flags;
74616
74617diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
74618index 92ac7e7..13f93d9 100644
74619--- a/net/ipv4/ipconfig.c
74620+++ b/net/ipv4/ipconfig.c
74621@@ -321,7 +321,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
74622
74623 mm_segment_t oldfs = get_fs();
74624 set_fs(get_ds());
74625- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
74626+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
74627 set_fs(oldfs);
74628 return res;
74629 }
74630@@ -332,7 +332,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
74631
74632 mm_segment_t oldfs = get_fs();
74633 set_fs(get_ds());
74634- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
74635+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
74636 set_fs(oldfs);
74637 return res;
74638 }
74639@@ -343,7 +343,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
74640
74641 mm_segment_t oldfs = get_fs();
74642 set_fs(get_ds());
74643- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
74644+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
74645 set_fs(oldfs);
74646 return res;
74647 }
74648diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
74649index 50009c7..5996a9f 100644
74650--- a/net/ipv4/ping.c
74651+++ b/net/ipv4/ping.c
74652@@ -838,7 +838,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
74653 sk_rmem_alloc_get(sp),
74654 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
74655 atomic_read(&sp->sk_refcnt), sp,
74656- atomic_read(&sp->sk_drops), len);
74657+ atomic_read_unchecked(&sp->sk_drops), len);
74658 }
74659
74660 static int ping_seq_show(struct seq_file *seq, void *v)
74661diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
74662index bbd604c..4d5469c 100644
74663--- a/net/ipv4/raw.c
74664+++ b/net/ipv4/raw.c
74665@@ -304,7 +304,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
74666 int raw_rcv(struct sock *sk, struct sk_buff *skb)
74667 {
74668 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
74669- atomic_inc(&sk->sk_drops);
74670+ atomic_inc_unchecked(&sk->sk_drops);
74671 kfree_skb(skb);
74672 return NET_RX_DROP;
74673 }
74674@@ -740,16 +740,20 @@ static int raw_init(struct sock *sk)
74675
74676 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
74677 {
74678+ struct icmp_filter filter;
74679+
74680 if (optlen > sizeof(struct icmp_filter))
74681 optlen = sizeof(struct icmp_filter);
74682- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
74683+ if (copy_from_user(&filter, optval, optlen))
74684 return -EFAULT;
74685+ raw_sk(sk)->filter = filter;
74686 return 0;
74687 }
74688
74689 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
74690 {
74691 int len, ret = -EFAULT;
74692+ struct icmp_filter filter;
74693
74694 if (get_user(len, optlen))
74695 goto out;
74696@@ -759,8 +763,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
74697 if (len > sizeof(struct icmp_filter))
74698 len = sizeof(struct icmp_filter);
74699 ret = -EFAULT;
74700- if (put_user(len, optlen) ||
74701- copy_to_user(optval, &raw_sk(sk)->filter, len))
74702+ filter = raw_sk(sk)->filter;
74703+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
74704 goto out;
74705 ret = 0;
74706 out: return ret;
74707@@ -988,7 +992,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
74708 sk_wmem_alloc_get(sp),
74709 sk_rmem_alloc_get(sp),
74710 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
74711- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
74712+ atomic_read(&sp->sk_refcnt),
74713+#ifdef CONFIG_GRKERNSEC_HIDESYM
74714+ NULL,
74715+#else
74716+ sp,
74717+#endif
74718+ atomic_read_unchecked(&sp->sk_drops));
74719 }
74720
74721 static int raw_seq_show(struct seq_file *seq, void *v)
74722diff --git a/net/ipv4/route.c b/net/ipv4/route.c
74723index 167ea10..4b15883 100644
74724--- a/net/ipv4/route.c
74725+++ b/net/ipv4/route.c
74726@@ -312,7 +312,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
74727
74728 static inline int rt_genid(struct net *net)
74729 {
74730- return atomic_read(&net->ipv4.rt_genid);
74731+ return atomic_read_unchecked(&net->ipv4.rt_genid);
74732 }
74733
74734 #ifdef CONFIG_PROC_FS
74735@@ -936,7 +936,7 @@ static void rt_cache_invalidate(struct net *net)
74736 unsigned char shuffle;
74737
74738 get_random_bytes(&shuffle, sizeof(shuffle));
74739- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
74740+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
74741 inetpeer_invalidate_tree(AF_INET);
74742 }
74743
74744@@ -3009,7 +3009,7 @@ static int rt_fill_info(struct net *net,
74745 error = rt->dst.error;
74746 if (peer) {
74747 inet_peer_refcheck(rt->peer);
74748- id = atomic_read(&peer->ip_id_count) & 0xffff;
74749+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
74750 if (peer->tcp_ts_stamp) {
74751 ts = peer->tcp_ts;
74752 tsage = get_seconds() - peer->tcp_ts_stamp;
74753diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
74754index 0cb86ce..8e7fda8 100644
74755--- a/net/ipv4/tcp_ipv4.c
74756+++ b/net/ipv4/tcp_ipv4.c
74757@@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
74758 EXPORT_SYMBOL(sysctl_tcp_low_latency);
74759
74760
74761+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74762+extern int grsec_enable_blackhole;
74763+#endif
74764+
74765 #ifdef CONFIG_TCP_MD5SIG
74766 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
74767 __be32 daddr, __be32 saddr, const struct tcphdr *th);
74768@@ -1641,6 +1645,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
74769 return 0;
74770
74771 reset:
74772+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74773+ if (!grsec_enable_blackhole)
74774+#endif
74775 tcp_v4_send_reset(rsk, skb);
74776 discard:
74777 kfree_skb(skb);
74778@@ -1703,12 +1710,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
74779 TCP_SKB_CB(skb)->sacked = 0;
74780
74781 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
74782- if (!sk)
74783+ if (!sk) {
74784+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74785+ ret = 1;
74786+#endif
74787 goto no_tcp_socket;
74788-
74789+ }
74790 process:
74791- if (sk->sk_state == TCP_TIME_WAIT)
74792+ if (sk->sk_state == TCP_TIME_WAIT) {
74793+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74794+ ret = 2;
74795+#endif
74796 goto do_time_wait;
74797+ }
74798
74799 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
74800 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
74801@@ -1758,6 +1772,10 @@ no_tcp_socket:
74802 bad_packet:
74803 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
74804 } else {
74805+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74806+ if (!grsec_enable_blackhole || (ret == 1 &&
74807+ (skb->dev->flags & IFF_LOOPBACK)))
74808+#endif
74809 tcp_v4_send_reset(NULL, skb);
74810 }
74811
74812@@ -2419,7 +2437,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
74813 0, /* non standard timer */
74814 0, /* open_requests have no inode */
74815 atomic_read(&sk->sk_refcnt),
74816+#ifdef CONFIG_GRKERNSEC_HIDESYM
74817+ NULL,
74818+#else
74819 req,
74820+#endif
74821 len);
74822 }
74823
74824@@ -2469,7 +2491,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
74825 sock_i_uid(sk),
74826 icsk->icsk_probes_out,
74827 sock_i_ino(sk),
74828- atomic_read(&sk->sk_refcnt), sk,
74829+ atomic_read(&sk->sk_refcnt),
74830+#ifdef CONFIG_GRKERNSEC_HIDESYM
74831+ NULL,
74832+#else
74833+ sk,
74834+#endif
74835 jiffies_to_clock_t(icsk->icsk_rto),
74836 jiffies_to_clock_t(icsk->icsk_ack.ato),
74837 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
74838@@ -2497,7 +2524,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
74839 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
74840 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
74841 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
74842- atomic_read(&tw->tw_refcnt), tw, len);
74843+ atomic_read(&tw->tw_refcnt),
74844+#ifdef CONFIG_GRKERNSEC_HIDESYM
74845+ NULL,
74846+#else
74847+ tw,
74848+#endif
74849+ len);
74850 }
74851
74852 #define TMPSZ 150
74853diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
74854index 3cabafb..640525b 100644
74855--- a/net/ipv4/tcp_minisocks.c
74856+++ b/net/ipv4/tcp_minisocks.c
74857@@ -27,6 +27,10 @@
74858 #include <net/inet_common.h>
74859 #include <net/xfrm.h>
74860
74861+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74862+extern int grsec_enable_blackhole;
74863+#endif
74864+
74865 int sysctl_tcp_syncookies __read_mostly = 1;
74866 EXPORT_SYMBOL(sysctl_tcp_syncookies);
74867
74868@@ -753,6 +757,10 @@ listen_overflow:
74869
74870 embryonic_reset:
74871 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
74872+
74873+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74874+ if (!grsec_enable_blackhole)
74875+#endif
74876 if (!(flg & TCP_FLAG_RST))
74877 req->rsk_ops->send_reset(sk, skb);
74878
74879diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
74880index a981cdc..48f4c3a 100644
74881--- a/net/ipv4/tcp_probe.c
74882+++ b/net/ipv4/tcp_probe.c
74883@@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
74884 if (cnt + width >= len)
74885 break;
74886
74887- if (copy_to_user(buf + cnt, tbuf, width))
74888+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
74889 return -EFAULT;
74890 cnt += width;
74891 }
74892diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
74893index 34d4a02..3b57f86 100644
74894--- a/net/ipv4/tcp_timer.c
74895+++ b/net/ipv4/tcp_timer.c
74896@@ -22,6 +22,10 @@
74897 #include <linux/gfp.h>
74898 #include <net/tcp.h>
74899
74900+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74901+extern int grsec_lastack_retries;
74902+#endif
74903+
74904 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
74905 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
74906 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
74907@@ -196,6 +200,13 @@ static int tcp_write_timeout(struct sock *sk)
74908 }
74909 }
74910
74911+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74912+ if ((sk->sk_state == TCP_LAST_ACK) &&
74913+ (grsec_lastack_retries > 0) &&
74914+ (grsec_lastack_retries < retry_until))
74915+ retry_until = grsec_lastack_retries;
74916+#endif
74917+
74918 if (retransmits_timed_out(sk, retry_until,
74919 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
74920 /* Has it gone just too far? */
74921diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
74922index fe14105..0618260 100644
74923--- a/net/ipv4/udp.c
74924+++ b/net/ipv4/udp.c
74925@@ -87,6 +87,7 @@
74926 #include <linux/types.h>
74927 #include <linux/fcntl.h>
74928 #include <linux/module.h>
74929+#include <linux/security.h>
74930 #include <linux/socket.h>
74931 #include <linux/sockios.h>
74932 #include <linux/igmp.h>
74933@@ -109,6 +110,10 @@
74934 #include <trace/events/udp.h>
74935 #include "udp_impl.h"
74936
74937+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74938+extern int grsec_enable_blackhole;
74939+#endif
74940+
74941 struct udp_table udp_table __read_mostly;
74942 EXPORT_SYMBOL(udp_table);
74943
74944@@ -567,6 +572,9 @@ found:
74945 return s;
74946 }
74947
74948+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
74949+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
74950+
74951 /*
74952 * This routine is called by the ICMP module when it gets some
74953 * sort of error condition. If err < 0 then the socket should
74954@@ -858,9 +866,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
74955 dport = usin->sin_port;
74956 if (dport == 0)
74957 return -EINVAL;
74958+
74959+ err = gr_search_udp_sendmsg(sk, usin);
74960+ if (err)
74961+ return err;
74962 } else {
74963 if (sk->sk_state != TCP_ESTABLISHED)
74964 return -EDESTADDRREQ;
74965+
74966+ err = gr_search_udp_sendmsg(sk, NULL);
74967+ if (err)
74968+ return err;
74969+
74970 daddr = inet->inet_daddr;
74971 dport = inet->inet_dport;
74972 /* Open fast path for connected socket.
74973@@ -1102,7 +1119,7 @@ static unsigned int first_packet_length(struct sock *sk)
74974 udp_lib_checksum_complete(skb)) {
74975 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
74976 IS_UDPLITE(sk));
74977- atomic_inc(&sk->sk_drops);
74978+ atomic_inc_unchecked(&sk->sk_drops);
74979 __skb_unlink(skb, rcvq);
74980 __skb_queue_tail(&list_kill, skb);
74981 }
74982@@ -1188,6 +1205,10 @@ try_again:
74983 if (!skb)
74984 goto out;
74985
74986+ err = gr_search_udp_recvmsg(sk, skb);
74987+ if (err)
74988+ goto out_free;
74989+
74990 ulen = skb->len - sizeof(struct udphdr);
74991 copied = len;
74992 if (copied > ulen)
74993@@ -1489,7 +1510,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74994
74995 drop:
74996 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
74997- atomic_inc(&sk->sk_drops);
74998+ atomic_inc_unchecked(&sk->sk_drops);
74999 kfree_skb(skb);
75000 return -1;
75001 }
75002@@ -1508,7 +1529,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
75003 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
75004
75005 if (!skb1) {
75006- atomic_inc(&sk->sk_drops);
75007+ atomic_inc_unchecked(&sk->sk_drops);
75008 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
75009 IS_UDPLITE(sk));
75010 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
75011@@ -1677,6 +1698,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75012 goto csum_error;
75013
75014 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
75015+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75016+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
75017+#endif
75018 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
75019
75020 /*
75021@@ -2094,8 +2118,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
75022 sk_wmem_alloc_get(sp),
75023 sk_rmem_alloc_get(sp),
75024 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75025- atomic_read(&sp->sk_refcnt), sp,
75026- atomic_read(&sp->sk_drops), len);
75027+ atomic_read(&sp->sk_refcnt),
75028+#ifdef CONFIG_GRKERNSEC_HIDESYM
75029+ NULL,
75030+#else
75031+ sp,
75032+#endif
75033+ atomic_read_unchecked(&sp->sk_drops), len);
75034 }
75035
75036 int udp4_seq_show(struct seq_file *seq, void *v)
75037diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
75038index 7d5cb97..c56564f 100644
75039--- a/net/ipv6/addrconf.c
75040+++ b/net/ipv6/addrconf.c
75041@@ -2142,7 +2142,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
75042 p.iph.ihl = 5;
75043 p.iph.protocol = IPPROTO_IPV6;
75044 p.iph.ttl = 64;
75045- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
75046+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
75047
75048 if (ops->ndo_do_ioctl) {
75049 mm_segment_t oldfs = get_fs();
75050diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
75051index 02dd203..e03fcc9 100644
75052--- a/net/ipv6/inet6_connection_sock.c
75053+++ b/net/ipv6/inet6_connection_sock.c
75054@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
75055 #ifdef CONFIG_XFRM
75056 {
75057 struct rt6_info *rt = (struct rt6_info *)dst;
75058- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
75059+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
75060 }
75061 #endif
75062 }
75063@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
75064 #ifdef CONFIG_XFRM
75065 if (dst) {
75066 struct rt6_info *rt = (struct rt6_info *)dst;
75067- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
75068+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
75069 __sk_dst_reset(sk);
75070 dst = NULL;
75071 }
75072diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
75073index 63dd1f8..e7f53ca 100644
75074--- a/net/ipv6/ipv6_sockglue.c
75075+++ b/net/ipv6/ipv6_sockglue.c
75076@@ -990,7 +990,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
75077 if (sk->sk_type != SOCK_STREAM)
75078 return -ENOPROTOOPT;
75079
75080- msg.msg_control = optval;
75081+ msg.msg_control = (void __force_kernel *)optval;
75082 msg.msg_controllen = len;
75083 msg.msg_flags = flags;
75084
75085diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
75086index 5bddea7..82d9d67 100644
75087--- a/net/ipv6/raw.c
75088+++ b/net/ipv6/raw.c
75089@@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
75090 {
75091 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
75092 skb_checksum_complete(skb)) {
75093- atomic_inc(&sk->sk_drops);
75094+ atomic_inc_unchecked(&sk->sk_drops);
75095 kfree_skb(skb);
75096 return NET_RX_DROP;
75097 }
75098@@ -405,7 +405,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
75099 struct raw6_sock *rp = raw6_sk(sk);
75100
75101 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
75102- atomic_inc(&sk->sk_drops);
75103+ atomic_inc_unchecked(&sk->sk_drops);
75104 kfree_skb(skb);
75105 return NET_RX_DROP;
75106 }
75107@@ -429,7 +429,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
75108
75109 if (inet->hdrincl) {
75110 if (skb_checksum_complete(skb)) {
75111- atomic_inc(&sk->sk_drops);
75112+ atomic_inc_unchecked(&sk->sk_drops);
75113 kfree_skb(skb);
75114 return NET_RX_DROP;
75115 }
75116@@ -602,7 +602,7 @@ out:
75117 return err;
75118 }
75119
75120-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
75121+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
75122 struct flowi6 *fl6, struct dst_entry **dstp,
75123 unsigned int flags)
75124 {
75125@@ -914,12 +914,15 @@ do_confirm:
75126 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
75127 char __user *optval, int optlen)
75128 {
75129+ struct icmp6_filter filter;
75130+
75131 switch (optname) {
75132 case ICMPV6_FILTER:
75133 if (optlen > sizeof(struct icmp6_filter))
75134 optlen = sizeof(struct icmp6_filter);
75135- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
75136+ if (copy_from_user(&filter, optval, optlen))
75137 return -EFAULT;
75138+ raw6_sk(sk)->filter = filter;
75139 return 0;
75140 default:
75141 return -ENOPROTOOPT;
75142@@ -932,6 +935,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
75143 char __user *optval, int __user *optlen)
75144 {
75145 int len;
75146+ struct icmp6_filter filter;
75147
75148 switch (optname) {
75149 case ICMPV6_FILTER:
75150@@ -943,7 +947,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
75151 len = sizeof(struct icmp6_filter);
75152 if (put_user(len, optlen))
75153 return -EFAULT;
75154- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
75155+ filter = raw6_sk(sk)->filter;
75156+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
75157 return -EFAULT;
75158 return 0;
75159 default:
75160@@ -1250,7 +1255,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
75161 0, 0L, 0,
75162 sock_i_uid(sp), 0,
75163 sock_i_ino(sp),
75164- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
75165+ atomic_read(&sp->sk_refcnt),
75166+#ifdef CONFIG_GRKERNSEC_HIDESYM
75167+ NULL,
75168+#else
75169+ sp,
75170+#endif
75171+ atomic_read_unchecked(&sp->sk_drops));
75172 }
75173
75174 static int raw6_seq_show(struct seq_file *seq, void *v)
75175diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
75176index 98256cf..7f16dbd 100644
75177--- a/net/ipv6/tcp_ipv6.c
75178+++ b/net/ipv6/tcp_ipv6.c
75179@@ -94,6 +94,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
75180 }
75181 #endif
75182
75183+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75184+extern int grsec_enable_blackhole;
75185+#endif
75186+
75187 static void tcp_v6_hash(struct sock *sk)
75188 {
75189 if (sk->sk_state != TCP_CLOSE) {
75190@@ -1542,6 +1546,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
75191 return 0;
75192
75193 reset:
75194+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75195+ if (!grsec_enable_blackhole)
75196+#endif
75197 tcp_v6_send_reset(sk, skb);
75198 discard:
75199 if (opt_skb)
75200@@ -1623,12 +1630,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
75201 TCP_SKB_CB(skb)->sacked = 0;
75202
75203 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
75204- if (!sk)
75205+ if (!sk) {
75206+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75207+ ret = 1;
75208+#endif
75209 goto no_tcp_socket;
75210+ }
75211
75212 process:
75213- if (sk->sk_state == TCP_TIME_WAIT)
75214+ if (sk->sk_state == TCP_TIME_WAIT) {
75215+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75216+ ret = 2;
75217+#endif
75218 goto do_time_wait;
75219+ }
75220
75221 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
75222 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
75223@@ -1676,6 +1691,10 @@ no_tcp_socket:
75224 bad_packet:
75225 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
75226 } else {
75227+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75228+ if (!grsec_enable_blackhole || (ret == 1 &&
75229+ (skb->dev->flags & IFF_LOOPBACK)))
75230+#endif
75231 tcp_v6_send_reset(NULL, skb);
75232 }
75233
75234@@ -1930,7 +1949,13 @@ static void get_openreq6(struct seq_file *seq,
75235 uid,
75236 0, /* non standard timer */
75237 0, /* open_requests have no inode */
75238- 0, req);
75239+ 0,
75240+#ifdef CONFIG_GRKERNSEC_HIDESYM
75241+ NULL
75242+#else
75243+ req
75244+#endif
75245+ );
75246 }
75247
75248 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
75249@@ -1980,7 +2005,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
75250 sock_i_uid(sp),
75251 icsk->icsk_probes_out,
75252 sock_i_ino(sp),
75253- atomic_read(&sp->sk_refcnt), sp,
75254+ atomic_read(&sp->sk_refcnt),
75255+#ifdef CONFIG_GRKERNSEC_HIDESYM
75256+ NULL,
75257+#else
75258+ sp,
75259+#endif
75260 jiffies_to_clock_t(icsk->icsk_rto),
75261 jiffies_to_clock_t(icsk->icsk_ack.ato),
75262 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
75263@@ -2015,7 +2045,13 @@ static void get_timewait6_sock(struct seq_file *seq,
75264 dest->s6_addr32[2], dest->s6_addr32[3], destp,
75265 tw->tw_substate, 0, 0,
75266 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
75267- atomic_read(&tw->tw_refcnt), tw);
75268+ atomic_read(&tw->tw_refcnt),
75269+#ifdef CONFIG_GRKERNSEC_HIDESYM
75270+ NULL
75271+#else
75272+ tw
75273+#endif
75274+ );
75275 }
75276
75277 static int tcp6_seq_show(struct seq_file *seq, void *v)
75278diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
75279index 37b0699..d323408 100644
75280--- a/net/ipv6/udp.c
75281+++ b/net/ipv6/udp.c
75282@@ -50,6 +50,10 @@
75283 #include <linux/seq_file.h>
75284 #include "udp_impl.h"
75285
75286+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75287+extern int grsec_enable_blackhole;
75288+#endif
75289+
75290 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
75291 {
75292 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
75293@@ -551,7 +555,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
75294
75295 return 0;
75296 drop:
75297- atomic_inc(&sk->sk_drops);
75298+ atomic_inc_unchecked(&sk->sk_drops);
75299 drop_no_sk_drops_inc:
75300 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
75301 kfree_skb(skb);
75302@@ -627,7 +631,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
75303 continue;
75304 }
75305 drop:
75306- atomic_inc(&sk->sk_drops);
75307+ atomic_inc_unchecked(&sk->sk_drops);
75308 UDP6_INC_STATS_BH(sock_net(sk),
75309 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
75310 UDP6_INC_STATS_BH(sock_net(sk),
75311@@ -782,6 +786,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75312 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
75313 proto == IPPROTO_UDPLITE);
75314
75315+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75316+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
75317+#endif
75318 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
75319
75320 kfree_skb(skb);
75321@@ -798,7 +805,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75322 if (!sock_owned_by_user(sk))
75323 udpv6_queue_rcv_skb(sk, skb);
75324 else if (sk_add_backlog(sk, skb)) {
75325- atomic_inc(&sk->sk_drops);
75326+ atomic_inc_unchecked(&sk->sk_drops);
75327 bh_unlock_sock(sk);
75328 sock_put(sk);
75329 goto discard;
75330@@ -1411,8 +1418,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
75331 0, 0L, 0,
75332 sock_i_uid(sp), 0,
75333 sock_i_ino(sp),
75334- atomic_read(&sp->sk_refcnt), sp,
75335- atomic_read(&sp->sk_drops));
75336+ atomic_read(&sp->sk_refcnt),
75337+#ifdef CONFIG_GRKERNSEC_HIDESYM
75338+ NULL,
75339+#else
75340+ sp,
75341+#endif
75342+ atomic_read_unchecked(&sp->sk_drops));
75343 }
75344
75345 int udp6_seq_show(struct seq_file *seq, void *v)
75346diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
75347index 6b9d5a0..4dffaf1 100644
75348--- a/net/irda/ircomm/ircomm_tty.c
75349+++ b/net/irda/ircomm/ircomm_tty.c
75350@@ -281,16 +281,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75351 add_wait_queue(&self->open_wait, &wait);
75352
75353 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
75354- __FILE__,__LINE__, tty->driver->name, self->open_count );
75355+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
75356
75357 /* As far as I can see, we protect open_count - Jean II */
75358 spin_lock_irqsave(&self->spinlock, flags);
75359 if (!tty_hung_up_p(filp)) {
75360 extra_count = 1;
75361- self->open_count--;
75362+ local_dec(&self->open_count);
75363 }
75364 spin_unlock_irqrestore(&self->spinlock, flags);
75365- self->blocked_open++;
75366+ local_inc(&self->blocked_open);
75367
75368 while (1) {
75369 if (tty->termios->c_cflag & CBAUD) {
75370@@ -330,7 +330,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75371 }
75372
75373 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
75374- __FILE__,__LINE__, tty->driver->name, self->open_count );
75375+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
75376
75377 schedule();
75378 }
75379@@ -341,13 +341,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75380 if (extra_count) {
75381 /* ++ is not atomic, so this should be protected - Jean II */
75382 spin_lock_irqsave(&self->spinlock, flags);
75383- self->open_count++;
75384+ local_inc(&self->open_count);
75385 spin_unlock_irqrestore(&self->spinlock, flags);
75386 }
75387- self->blocked_open--;
75388+ local_dec(&self->blocked_open);
75389
75390 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
75391- __FILE__,__LINE__, tty->driver->name, self->open_count);
75392+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
75393
75394 if (!retval)
75395 self->flags |= ASYNC_NORMAL_ACTIVE;
75396@@ -412,14 +412,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
75397 }
75398 /* ++ is not atomic, so this should be protected - Jean II */
75399 spin_lock_irqsave(&self->spinlock, flags);
75400- self->open_count++;
75401+ local_inc(&self->open_count);
75402
75403 tty->driver_data = self;
75404 self->tty = tty;
75405 spin_unlock_irqrestore(&self->spinlock, flags);
75406
75407 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
75408- self->line, self->open_count);
75409+ self->line, local_read(&self->open_count));
75410
75411 /* Not really used by us, but lets do it anyway */
75412 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
75413@@ -505,7 +505,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75414 return;
75415 }
75416
75417- if ((tty->count == 1) && (self->open_count != 1)) {
75418+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
75419 /*
75420 * Uh, oh. tty->count is 1, which means that the tty
75421 * structure will be freed. state->count should always
75422@@ -515,16 +515,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75423 */
75424 IRDA_DEBUG(0, "%s(), bad serial port count; "
75425 "tty->count is 1, state->count is %d\n", __func__ ,
75426- self->open_count);
75427- self->open_count = 1;
75428+ local_read(&self->open_count));
75429+ local_set(&self->open_count, 1);
75430 }
75431
75432- if (--self->open_count < 0) {
75433+ if (local_dec_return(&self->open_count) < 0) {
75434 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
75435- __func__, self->line, self->open_count);
75436- self->open_count = 0;
75437+ __func__, self->line, local_read(&self->open_count));
75438+ local_set(&self->open_count, 0);
75439 }
75440- if (self->open_count) {
75441+ if (local_read(&self->open_count)) {
75442 spin_unlock_irqrestore(&self->spinlock, flags);
75443
75444 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
75445@@ -556,7 +556,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75446 tty->closing = 0;
75447 self->tty = NULL;
75448
75449- if (self->blocked_open) {
75450+ if (local_read(&self->blocked_open)) {
75451 if (self->close_delay)
75452 schedule_timeout_interruptible(self->close_delay);
75453 wake_up_interruptible(&self->open_wait);
75454@@ -1008,7 +1008,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
75455 spin_lock_irqsave(&self->spinlock, flags);
75456 self->flags &= ~ASYNC_NORMAL_ACTIVE;
75457 self->tty = NULL;
75458- self->open_count = 0;
75459+ local_set(&self->open_count, 0);
75460 spin_unlock_irqrestore(&self->spinlock, flags);
75461
75462 wake_up_interruptible(&self->open_wait);
75463@@ -1355,7 +1355,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
75464 seq_putc(m, '\n');
75465
75466 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
75467- seq_printf(m, "Open count: %d\n", self->open_count);
75468+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
75469 seq_printf(m, "Max data size: %d\n", self->max_data_size);
75470 seq_printf(m, "Max header size: %d\n", self->max_header_size);
75471
75472diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
75473index 07d7d55..541de95 100644
75474--- a/net/iucv/af_iucv.c
75475+++ b/net/iucv/af_iucv.c
75476@@ -783,10 +783,10 @@ static int iucv_sock_autobind(struct sock *sk)
75477
75478 write_lock_bh(&iucv_sk_list.lock);
75479
75480- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
75481+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
75482 while (__iucv_get_sock_by_name(name)) {
75483 sprintf(name, "%08x",
75484- atomic_inc_return(&iucv_sk_list.autobind_name));
75485+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
75486 }
75487
75488 write_unlock_bh(&iucv_sk_list.lock);
75489diff --git a/net/key/af_key.c b/net/key/af_key.c
75490index 7e5d927..cdbb54e 100644
75491--- a/net/key/af_key.c
75492+++ b/net/key/af_key.c
75493@@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
75494 static u32 get_acqseq(void)
75495 {
75496 u32 res;
75497- static atomic_t acqseq;
75498+ static atomic_unchecked_t acqseq;
75499
75500 do {
75501- res = atomic_inc_return(&acqseq);
75502+ res = atomic_inc_return_unchecked(&acqseq);
75503 } while (!res);
75504 return res;
75505 }
75506diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
75507index db8fae5..ff070cd 100644
75508--- a/net/mac80211/ieee80211_i.h
75509+++ b/net/mac80211/ieee80211_i.h
75510@@ -28,6 +28,7 @@
75511 #include <net/ieee80211_radiotap.h>
75512 #include <net/cfg80211.h>
75513 #include <net/mac80211.h>
75514+#include <asm/local.h>
75515 #include "key.h"
75516 #include "sta_info.h"
75517
75518@@ -842,7 +843,7 @@ struct ieee80211_local {
75519 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
75520 spinlock_t queue_stop_reason_lock;
75521
75522- int open_count;
75523+ local_t open_count;
75524 int monitors, cooked_mntrs;
75525 /* number of interfaces with corresponding FIF_ flags */
75526 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
75527diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
75528index 48f937e..4ccd7b8 100644
75529--- a/net/mac80211/iface.c
75530+++ b/net/mac80211/iface.c
75531@@ -222,7 +222,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75532 break;
75533 }
75534
75535- if (local->open_count == 0) {
75536+ if (local_read(&local->open_count) == 0) {
75537 res = drv_start(local);
75538 if (res)
75539 goto err_del_bss;
75540@@ -246,7 +246,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75541 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
75542
75543 if (!is_valid_ether_addr(dev->dev_addr)) {
75544- if (!local->open_count)
75545+ if (!local_read(&local->open_count))
75546 drv_stop(local);
75547 return -EADDRNOTAVAIL;
75548 }
75549@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75550 mutex_unlock(&local->mtx);
75551
75552 if (coming_up)
75553- local->open_count++;
75554+ local_inc(&local->open_count);
75555
75556 if (hw_reconf_flags)
75557 ieee80211_hw_config(local, hw_reconf_flags);
75558@@ -360,7 +360,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75559 err_del_interface:
75560 drv_remove_interface(local, sdata);
75561 err_stop:
75562- if (!local->open_count)
75563+ if (!local_read(&local->open_count))
75564 drv_stop(local);
75565 err_del_bss:
75566 sdata->bss = NULL;
75567@@ -491,7 +491,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
75568 }
75569
75570 if (going_down)
75571- local->open_count--;
75572+ local_dec(&local->open_count);
75573
75574 switch (sdata->vif.type) {
75575 case NL80211_IFTYPE_AP_VLAN:
75576@@ -562,7 +562,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
75577
75578 ieee80211_recalc_ps(local, -1);
75579
75580- if (local->open_count == 0) {
75581+ if (local_read(&local->open_count) == 0) {
75582 if (local->ops->napi_poll)
75583 napi_disable(&local->napi);
75584 ieee80211_clear_tx_pending(local);
75585diff --git a/net/mac80211/main.c b/net/mac80211/main.c
75586index 1633648..d45ebfa 100644
75587--- a/net/mac80211/main.c
75588+++ b/net/mac80211/main.c
75589@@ -164,7 +164,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
75590 local->hw.conf.power_level = power;
75591 }
75592
75593- if (changed && local->open_count) {
75594+ if (changed && local_read(&local->open_count)) {
75595 ret = drv_config(local, changed);
75596 /*
75597 * Goal:
75598diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
75599index ef8eba1..5c63952 100644
75600--- a/net/mac80211/pm.c
75601+++ b/net/mac80211/pm.c
75602@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75603 struct ieee80211_sub_if_data *sdata;
75604 struct sta_info *sta;
75605
75606- if (!local->open_count)
75607+ if (!local_read(&local->open_count))
75608 goto suspend;
75609
75610 ieee80211_scan_cancel(local);
75611@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75612 cancel_work_sync(&local->dynamic_ps_enable_work);
75613 del_timer_sync(&local->dynamic_ps_timer);
75614
75615- local->wowlan = wowlan && local->open_count;
75616+ local->wowlan = wowlan && local_read(&local->open_count);
75617 if (local->wowlan) {
75618 int err = drv_suspend(local, wowlan);
75619 if (err < 0) {
75620@@ -128,7 +128,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75621 }
75622
75623 /* stop hardware - this must stop RX */
75624- if (local->open_count)
75625+ if (local_read(&local->open_count))
75626 ieee80211_stop_device(local);
75627
75628 suspend:
75629diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
75630index 3313c11..bec9f17 100644
75631--- a/net/mac80211/rate.c
75632+++ b/net/mac80211/rate.c
75633@@ -494,7 +494,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
75634
75635 ASSERT_RTNL();
75636
75637- if (local->open_count)
75638+ if (local_read(&local->open_count))
75639 return -EBUSY;
75640
75641 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
75642diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
75643index c97a065..ff61928 100644
75644--- a/net/mac80211/rc80211_pid_debugfs.c
75645+++ b/net/mac80211/rc80211_pid_debugfs.c
75646@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
75647
75648 spin_unlock_irqrestore(&events->lock, status);
75649
75650- if (copy_to_user(buf, pb, p))
75651+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
75652 return -EFAULT;
75653
75654 return p;
75655diff --git a/net/mac80211/util.c b/net/mac80211/util.c
75656index eb9d7c0..d34b832 100644
75657--- a/net/mac80211/util.c
75658+++ b/net/mac80211/util.c
75659@@ -1179,7 +1179,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
75660 }
75661 #endif
75662 /* everything else happens only if HW was up & running */
75663- if (!local->open_count)
75664+ if (!local_read(&local->open_count))
75665 goto wake_up;
75666
75667 /*
75668diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
75669index 0c6f67e..d02cdfc 100644
75670--- a/net/netfilter/Kconfig
75671+++ b/net/netfilter/Kconfig
75672@@ -836,6 +836,16 @@ config NETFILTER_XT_MATCH_ESP
75673
75674 To compile it as a module, choose M here. If unsure, say N.
75675
75676+config NETFILTER_XT_MATCH_GRADM
75677+ tristate '"gradm" match support'
75678+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
75679+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
75680+ ---help---
75681+ The gradm match allows to match on grsecurity RBAC being enabled.
75682+ It is useful when iptables rules are applied early on bootup to
75683+ prevent connections to the machine (except from a trusted host)
75684+ while the RBAC system is disabled.
75685+
75686 config NETFILTER_XT_MATCH_HASHLIMIT
75687 tristate '"hashlimit" match support'
75688 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
75689diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
75690index ca36765..0882e7c 100644
75691--- a/net/netfilter/Makefile
75692+++ b/net/netfilter/Makefile
75693@@ -86,6 +86,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
75694 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
75695 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
75696 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
75697+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
75698 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
75699 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
75700 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
75701diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
75702index 29fa5ba..8debc79 100644
75703--- a/net/netfilter/ipvs/ip_vs_conn.c
75704+++ b/net/netfilter/ipvs/ip_vs_conn.c
75705@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
75706 /* Increase the refcnt counter of the dest */
75707 atomic_inc(&dest->refcnt);
75708
75709- conn_flags = atomic_read(&dest->conn_flags);
75710+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
75711 if (cp->protocol != IPPROTO_UDP)
75712 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
75713 /* Bind with the destination and its corresponding transmitter */
75714@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
75715 atomic_set(&cp->refcnt, 1);
75716
75717 atomic_set(&cp->n_control, 0);
75718- atomic_set(&cp->in_pkts, 0);
75719+ atomic_set_unchecked(&cp->in_pkts, 0);
75720
75721 atomic_inc(&ipvs->conn_count);
75722 if (flags & IP_VS_CONN_F_NO_CPORT)
75723@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
75724
75725 /* Don't drop the entry if its number of incoming packets is not
75726 located in [0, 8] */
75727- i = atomic_read(&cp->in_pkts);
75728+ i = atomic_read_unchecked(&cp->in_pkts);
75729 if (i > 8 || i < 0) return 0;
75730
75731 if (!todrop_rate[i]) return 0;
75732diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
75733index 00bdb1d..6725a48 100644
75734--- a/net/netfilter/ipvs/ip_vs_core.c
75735+++ b/net/netfilter/ipvs/ip_vs_core.c
75736@@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
75737 ret = cp->packet_xmit(skb, cp, pd->pp);
75738 /* do not touch skb anymore */
75739
75740- atomic_inc(&cp->in_pkts);
75741+ atomic_inc_unchecked(&cp->in_pkts);
75742 ip_vs_conn_put(cp);
75743 return ret;
75744 }
75745@@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
75746 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
75747 pkts = sysctl_sync_threshold(ipvs);
75748 else
75749- pkts = atomic_add_return(1, &cp->in_pkts);
75750+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
75751
75752 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
75753 cp->protocol == IPPROTO_SCTP) {
75754diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
75755index f558998..9cdff60 100644
75756--- a/net/netfilter/ipvs/ip_vs_ctl.c
75757+++ b/net/netfilter/ipvs/ip_vs_ctl.c
75758@@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
75759 ip_vs_rs_hash(ipvs, dest);
75760 write_unlock_bh(&ipvs->rs_lock);
75761 }
75762- atomic_set(&dest->conn_flags, conn_flags);
75763+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
75764
75765 /* bind the service */
75766 if (!dest->svc) {
75767@@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
75768 " %-7s %-6d %-10d %-10d\n",
75769 &dest->addr.in6,
75770 ntohs(dest->port),
75771- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
75772+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
75773 atomic_read(&dest->weight),
75774 atomic_read(&dest->activeconns),
75775 atomic_read(&dest->inactconns));
75776@@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
75777 "%-7s %-6d %-10d %-10d\n",
75778 ntohl(dest->addr.ip),
75779 ntohs(dest->port),
75780- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
75781+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
75782 atomic_read(&dest->weight),
75783 atomic_read(&dest->activeconns),
75784 atomic_read(&dest->inactconns));
75785@@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
75786
75787 entry.addr = dest->addr.ip;
75788 entry.port = dest->port;
75789- entry.conn_flags = atomic_read(&dest->conn_flags);
75790+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
75791 entry.weight = atomic_read(&dest->weight);
75792 entry.u_threshold = dest->u_threshold;
75793 entry.l_threshold = dest->l_threshold;
75794@@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
75795 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
75796
75797 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
75798- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
75799+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
75800 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
75801 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
75802 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
75803diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
75804index 8a0d6d6..90ec197 100644
75805--- a/net/netfilter/ipvs/ip_vs_sync.c
75806+++ b/net/netfilter/ipvs/ip_vs_sync.c
75807@@ -649,7 +649,7 @@ control:
75808 * i.e only increment in_pkts for Templates.
75809 */
75810 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
75811- int pkts = atomic_add_return(1, &cp->in_pkts);
75812+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
75813
75814 if (pkts % sysctl_sync_period(ipvs) != 1)
75815 return;
75816@@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
75817
75818 if (opt)
75819 memcpy(&cp->in_seq, opt, sizeof(*opt));
75820- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
75821+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
75822 cp->state = state;
75823 cp->old_state = cp->state;
75824 /*
75825diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
75826index 7fd66de..e6fb361 100644
75827--- a/net/netfilter/ipvs/ip_vs_xmit.c
75828+++ b/net/netfilter/ipvs/ip_vs_xmit.c
75829@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
75830 else
75831 rc = NF_ACCEPT;
75832 /* do not touch skb anymore */
75833- atomic_inc(&cp->in_pkts);
75834+ atomic_inc_unchecked(&cp->in_pkts);
75835 goto out;
75836 }
75837
75838@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
75839 else
75840 rc = NF_ACCEPT;
75841 /* do not touch skb anymore */
75842- atomic_inc(&cp->in_pkts);
75843+ atomic_inc_unchecked(&cp->in_pkts);
75844 goto out;
75845 }
75846
75847diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
75848index 66b2c54..c7884e3 100644
75849--- a/net/netfilter/nfnetlink_log.c
75850+++ b/net/netfilter/nfnetlink_log.c
75851@@ -70,7 +70,7 @@ struct nfulnl_instance {
75852 };
75853
75854 static DEFINE_SPINLOCK(instances_lock);
75855-static atomic_t global_seq;
75856+static atomic_unchecked_t global_seq;
75857
75858 #define INSTANCE_BUCKETS 16
75859 static struct hlist_head instance_table[INSTANCE_BUCKETS];
75860@@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
75861 /* global sequence number */
75862 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
75863 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
75864- htonl(atomic_inc_return(&global_seq)));
75865+ htonl(atomic_inc_return_unchecked(&global_seq)));
75866
75867 if (data_len) {
75868 struct nlattr *nla;
75869diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
75870new file mode 100644
75871index 0000000..6905327
75872--- /dev/null
75873+++ b/net/netfilter/xt_gradm.c
75874@@ -0,0 +1,51 @@
75875+/*
75876+ * gradm match for netfilter
75877