]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9.1-3.4.3-201206182054.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.4.3-201206182054.patch
CommitLineData
811b06e3
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index b4a898f..830febf 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,9 +51,11 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37@@ -69,6 +74,7 @@ Image
38 Module.markers
39 Module.symvers
40 PENDING
41+PERF*
42 SCCS
43 System.map*
44 TAGS
45@@ -80,6 +86,7 @@ aic7*seq.h*
46 aicasm
47 aicdb.h*
48 altivec*.c
49+ashldi3.S
50 asm-offsets.h
51 asm_offsets.h
52 autoconf.h*
53@@ -92,19 +99,24 @@ bounds.h
54 bsetup
55 btfixupprep
56 build
57+builtin-policy.h
58 bvmlinux
59 bzImage*
60 capability_names.h
61 capflags.c
62 classlist.h*
63+clut_vga16.c
64+common-cmds.h
65 comp*.log
66 compile.h*
67 conf
68 config
69 config-*
70 config_data.h*
71+config.c
72 config.mak
73 config.mak.autogen
74+config.tmp
75 conmakehash
76 consolemap_deftbl.c*
77 cpustr.h
78@@ -115,9 +127,11 @@ devlist.h*
79 dnotify_test
80 docproc
81 dslm
82+dtc-lexer.lex.c
83 elf2ecoff
84 elfconfig.h*
85 evergreen_reg_safe.h
86+exception_policy.conf
87 fixdep
88 flask.h
89 fore200e_mkfirm
90@@ -125,12 +139,15 @@ fore200e_pca_fw.c*
91 gconf
92 gconf.glade.h
93 gen-devlist
94+gen-kdb_cmds.c
95 gen_crc32table
96 gen_init_cpio
97 generated
98 genheaders
99 genksyms
100 *_gray256.c
101+hash
102+hid-example
103 hpet_example
104 hugepage-mmap
105 hugepage-shm
106@@ -145,7 +162,7 @@ int32.c
107 int4.c
108 int8.c
109 kallsyms
110-kconfig
111+kern_constants.h
112 keywords.c
113 ksym.c*
114 ksym.h*
115@@ -153,7 +170,7 @@ kxgettext
116 lkc_defs.h
117 lex.c
118 lex.*.c
119-linux
120+lib1funcs.S
121 logo_*.c
122 logo_*_clut224.c
123 logo_*_mono.c
124@@ -164,14 +181,15 @@ machtypes.h
125 map
126 map_hugetlb
127 maui_boot.h
128-media
129 mconf
130+mdp
131 miboot*
132 mk_elfconfig
133 mkboot
134 mkbugboot
135 mkcpustr
136 mkdep
137+mkpiggy
138 mkprep
139 mkregtable
140 mktables
141@@ -188,6 +206,7 @@ oui.c*
142 page-types
143 parse.c
144 parse.h
145+parse-events*
146 patches*
147 pca200e.bin
148 pca200e_ecd.bin2
149@@ -197,6 +216,7 @@ perf-archive
150 piggyback
151 piggy.gzip
152 piggy.S
153+pmu-*
154 pnmtologo
155 ppc_defs.h*
156 pss_boot.h
157@@ -207,6 +227,7 @@ r300_reg_safe.h
158 r420_reg_safe.h
159 r600_reg_safe.h
160 recordmcount
161+regdb.c
162 relocs
163 rlim_names.h
164 rn50_reg_safe.h
165@@ -217,6 +238,7 @@ setup
166 setup.bin
167 setup.elf
168 sImage
169+slabinfo
170 sm_tbl*
171 split-include
172 syscalltab.h
173@@ -227,6 +249,7 @@ tftpboot.img
174 timeconst.h
175 times.h*
176 trix_boot.h
177+user_constants.h
178 utsrelease.h*
179 vdso-syms.lds
180 vdso.lds
181@@ -238,13 +261,17 @@ vdso32.lds
182 vdso32.so.dbg
183 vdso64.lds
184 vdso64.so.dbg
185+vdsox32.lds
186+vdsox32-syms.lds
187 version.h*
188 vmImage
189 vmlinux
190 vmlinux-*
191 vmlinux.aout
192 vmlinux.bin.all
193+vmlinux.bin.bz2
194 vmlinux.lds
195+vmlinux.relocs
196 vmlinuz
197 voffset.h
198 vsyscall.lds
199@@ -252,9 +279,11 @@ vsyscall_32.lds
200 wanxlfw.inc
201 uImage
202 unifdef
203+utsrelease.h
204 wakeup.bin
205 wakeup.elf
206 wakeup.lds
207 zImage*
208 zconf.hash.c
209+zconf.lex.c
210 zoffset.h
211diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
212index c1601e5..08557ce 100644
213--- a/Documentation/kernel-parameters.txt
214+++ b/Documentation/kernel-parameters.txt
215@@ -2021,6 +2021,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
216 the specified number of seconds. This is to be used if
217 your oopses keep scrolling off the screen.
218
219+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
220+ virtualization environments that don't cope well with the
221+ expand down segment used by UDEREF on X86-32 or the frequent
222+ page table updates on X86-64.
223+
224+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
225+
226 pcbit= [HW,ISDN]
227
228 pcd. [PARIDE]
229diff --git a/Makefile b/Makefile
230index a0804c6..f487027 100644
231--- a/Makefile
232+++ b/Makefile
233@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
234
235 HOSTCC = gcc
236 HOSTCXX = g++
237-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
238-HOSTCXXFLAGS = -O2
239+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
240+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
241+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
242
243 # Decide whether to build built-in, modular, or both.
244 # Normally, just do built-in.
245@@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
246 # Rules shared between *config targets and build targets
247
248 # Basic helpers built in scripts/
249-PHONY += scripts_basic
250-scripts_basic:
251+PHONY += scripts_basic gcc-plugins
252+scripts_basic: gcc-plugins
253 $(Q)$(MAKE) $(build)=scripts/basic
254 $(Q)rm -f .tmp_quiet_recordmcount
255
256@@ -564,6 +565,56 @@ else
257 KBUILD_CFLAGS += -O2
258 endif
259
260+ifndef DISABLE_PAX_PLUGINS
261+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
262+ifneq ($(PLUGINCC),)
263+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
264+ifndef CONFIG_UML
265+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
266+endif
267+endif
268+ifdef CONFIG_PAX_MEMORY_STACKLEAK
269+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
270+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
271+endif
272+ifdef CONFIG_KALLOCSTAT_PLUGIN
273+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
274+endif
275+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
276+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
277+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
278+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
279+endif
280+ifdef CONFIG_CHECKER_PLUGIN
281+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
282+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
283+endif
284+endif
285+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
286+ifdef CONFIG_PAX_SIZE_OVERFLOW
287+SIZE_OVERFLOW_PLUGIN := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
288+endif
289+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
290+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS) $(SIZE_OVERFLOW_PLUGIN)
291+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
292+export PLUGINCC CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN SIZE_OVERFLOW_PLUGIN
293+ifeq ($(KBUILD_EXTMOD),)
294+gcc-plugins:
295+ $(Q)$(MAKE) $(build)=tools/gcc
296+else
297+gcc-plugins: ;
298+endif
299+else
300+gcc-plugins:
301+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
302+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
303+else
304+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
305+endif
306+ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
307+endif
308+endif
309+
310 include $(srctree)/arch/$(SRCARCH)/Makefile
311
312 ifneq ($(CONFIG_FRAME_WARN),0)
313@@ -708,7 +759,7 @@ export mod_strip_cmd
314
315
316 ifeq ($(KBUILD_EXTMOD),)
317-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
318+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
319
320 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
321 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
322@@ -932,6 +983,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
323
324 # The actual objects are generated when descending,
325 # make sure no implicit rule kicks in
326+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
327+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
328 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
329
330 # Handle descending into subdirectories listed in $(vmlinux-dirs)
331@@ -941,7 +994,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
332 # Error messages still appears in the original language
333
334 PHONY += $(vmlinux-dirs)
335-$(vmlinux-dirs): prepare scripts
336+$(vmlinux-dirs): gcc-plugins prepare scripts
337 $(Q)$(MAKE) $(build)=$@
338
339 # Store (new) KERNELRELASE string in include/config/kernel.release
340@@ -985,6 +1038,7 @@ prepare0: archprepare FORCE
341 $(Q)$(MAKE) $(build)=.
342
343 # All the preparing..
344+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
345 prepare: prepare0
346
347 # Generate some files
348@@ -1092,6 +1146,8 @@ all: modules
349 # using awk while concatenating to the final file.
350
351 PHONY += modules
352+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
353+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
354 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
355 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
356 @$(kecho) ' Building modules, stage 2.';
357@@ -1107,7 +1163,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
358
359 # Target to prepare building external modules
360 PHONY += modules_prepare
361-modules_prepare: prepare scripts
362+modules_prepare: gcc-plugins prepare scripts
363
364 # Target to install modules
365 PHONY += modules_install
366@@ -1204,6 +1260,7 @@ distclean: mrproper
367 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
368 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
369 -o -name '.*.rej' \
370+ -o -name '.*.rej' -o -name '*.so' \
371 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
372 -type f -print | xargs rm -f
373
374@@ -1364,6 +1421,8 @@ PHONY += $(module-dirs) modules
375 $(module-dirs): crmodverdir $(objtree)/Module.symvers
376 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
377
378+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
379+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
380 modules: $(module-dirs)
381 @$(kecho) ' Building modules, stage 2.';
382 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
383@@ -1490,17 +1549,21 @@ else
384 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
385 endif
386
387-%.s: %.c prepare scripts FORCE
388+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
389+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
390+%.s: %.c gcc-plugins prepare scripts FORCE
391 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
392 %.i: %.c prepare scripts FORCE
393 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
394-%.o: %.c prepare scripts FORCE
395+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
396+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
397+%.o: %.c gcc-plugins prepare scripts FORCE
398 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
399 %.lst: %.c prepare scripts FORCE
400 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
401-%.s: %.S prepare scripts FORCE
402+%.s: %.S gcc-plugins prepare scripts FORCE
403 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
404-%.o: %.S prepare scripts FORCE
405+%.o: %.S gcc-plugins prepare scripts FORCE
406 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
407 %.symtypes: %.c prepare scripts FORCE
408 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
409@@ -1510,11 +1573,15 @@ endif
410 $(cmd_crmodverdir)
411 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
412 $(build)=$(build-dir)
413-%/: prepare scripts FORCE
414+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
415+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
416+%/: gcc-plugins prepare scripts FORCE
417 $(cmd_crmodverdir)
418 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
419 $(build)=$(build-dir)
420-%.ko: prepare scripts FORCE
421+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
422+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
423+%.ko: gcc-plugins prepare scripts FORCE
424 $(cmd_crmodverdir)
425 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
426 $(build)=$(build-dir) $(@:.ko=.o)
427diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
428index 3bb7ffe..347a54c 100644
429--- a/arch/alpha/include/asm/atomic.h
430+++ b/arch/alpha/include/asm/atomic.h
431@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
432 #define atomic_dec(v) atomic_sub(1,(v))
433 #define atomic64_dec(v) atomic64_sub(1,(v))
434
435+#define atomic64_read_unchecked(v) atomic64_read(v)
436+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
437+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
438+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
439+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
440+#define atomic64_inc_unchecked(v) atomic64_inc(v)
441+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
442+#define atomic64_dec_unchecked(v) atomic64_dec(v)
443+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
444+
445 #define smp_mb__before_atomic_dec() smp_mb()
446 #define smp_mb__after_atomic_dec() smp_mb()
447 #define smp_mb__before_atomic_inc() smp_mb()
448diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
449index ad368a9..fbe0f25 100644
450--- a/arch/alpha/include/asm/cache.h
451+++ b/arch/alpha/include/asm/cache.h
452@@ -4,19 +4,19 @@
453 #ifndef __ARCH_ALPHA_CACHE_H
454 #define __ARCH_ALPHA_CACHE_H
455
456+#include <linux/const.h>
457
458 /* Bytes per L1 (data) cache line. */
459 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
460-# define L1_CACHE_BYTES 64
461 # define L1_CACHE_SHIFT 6
462 #else
463 /* Both EV4 and EV5 are write-through, read-allocate,
464 direct-mapped, physical.
465 */
466-# define L1_CACHE_BYTES 32
467 # define L1_CACHE_SHIFT 5
468 #endif
469
470+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
471 #define SMP_CACHE_BYTES L1_CACHE_BYTES
472
473 #endif
474diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
475index 968d999..d36b2df 100644
476--- a/arch/alpha/include/asm/elf.h
477+++ b/arch/alpha/include/asm/elf.h
478@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
479
480 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
481
482+#ifdef CONFIG_PAX_ASLR
483+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
484+
485+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
486+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
487+#endif
488+
489 /* $0 is set by ld.so to a pointer to a function which might be
490 registered using atexit. This provides a mean for the dynamic
491 linker to call DT_FINI functions for shared libraries that have
492diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
493index bc2a0da..8ad11ee 100644
494--- a/arch/alpha/include/asm/pgalloc.h
495+++ b/arch/alpha/include/asm/pgalloc.h
496@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
497 pgd_set(pgd, pmd);
498 }
499
500+static inline void
501+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
502+{
503+ pgd_populate(mm, pgd, pmd);
504+}
505+
506 extern pgd_t *pgd_alloc(struct mm_struct *mm);
507
508 static inline void
509diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
510index 81a4342..348b927 100644
511--- a/arch/alpha/include/asm/pgtable.h
512+++ b/arch/alpha/include/asm/pgtable.h
513@@ -102,6 +102,17 @@ struct vm_area_struct;
514 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
515 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
516 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
517+
518+#ifdef CONFIG_PAX_PAGEEXEC
519+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
520+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
521+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
522+#else
523+# define PAGE_SHARED_NOEXEC PAGE_SHARED
524+# define PAGE_COPY_NOEXEC PAGE_COPY
525+# define PAGE_READONLY_NOEXEC PAGE_READONLY
526+#endif
527+
528 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
529
530 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
531diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
532index 2fd00b7..cfd5069 100644
533--- a/arch/alpha/kernel/module.c
534+++ b/arch/alpha/kernel/module.c
535@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
536
537 /* The small sections were sorted to the end of the segment.
538 The following should definitely cover them. */
539- gp = (u64)me->module_core + me->core_size - 0x8000;
540+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
541 got = sechdrs[me->arch.gotsecindex].sh_addr;
542
543 for (i = 0; i < n; i++) {
544diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
545index 49ee319..9ee7d14 100644
546--- a/arch/alpha/kernel/osf_sys.c
547+++ b/arch/alpha/kernel/osf_sys.c
548@@ -1146,7 +1146,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
549 /* At this point: (!vma || addr < vma->vm_end). */
550 if (limit - len < addr)
551 return -ENOMEM;
552- if (!vma || addr + len <= vma->vm_start)
553+ if (check_heap_stack_gap(vma, addr, len))
554 return addr;
555 addr = vma->vm_end;
556 vma = vma->vm_next;
557@@ -1182,6 +1182,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
558 merely specific addresses, but regions of memory -- perhaps
559 this feature should be incorporated into all ports? */
560
561+#ifdef CONFIG_PAX_RANDMMAP
562+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
563+#endif
564+
565 if (addr) {
566 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
567 if (addr != (unsigned long) -ENOMEM)
568@@ -1189,8 +1193,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
569 }
570
571 /* Next, try allocating at TASK_UNMAPPED_BASE. */
572- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
573- len, limit);
574+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
575+
576 if (addr != (unsigned long) -ENOMEM)
577 return addr;
578
579diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
580index 5eecab1..609abc0 100644
581--- a/arch/alpha/mm/fault.c
582+++ b/arch/alpha/mm/fault.c
583@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
584 __reload_thread(pcb);
585 }
586
587+#ifdef CONFIG_PAX_PAGEEXEC
588+/*
589+ * PaX: decide what to do with offenders (regs->pc = fault address)
590+ *
591+ * returns 1 when task should be killed
592+ * 2 when patched PLT trampoline was detected
593+ * 3 when unpatched PLT trampoline was detected
594+ */
595+static int pax_handle_fetch_fault(struct pt_regs *regs)
596+{
597+
598+#ifdef CONFIG_PAX_EMUPLT
599+ int err;
600+
601+ do { /* PaX: patched PLT emulation #1 */
602+ unsigned int ldah, ldq, jmp;
603+
604+ err = get_user(ldah, (unsigned int *)regs->pc);
605+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
606+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
607+
608+ if (err)
609+ break;
610+
611+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
612+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
613+ jmp == 0x6BFB0000U)
614+ {
615+ unsigned long r27, addr;
616+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
617+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
618+
619+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
620+ err = get_user(r27, (unsigned long *)addr);
621+ if (err)
622+ break;
623+
624+ regs->r27 = r27;
625+ regs->pc = r27;
626+ return 2;
627+ }
628+ } while (0);
629+
630+ do { /* PaX: patched PLT emulation #2 */
631+ unsigned int ldah, lda, br;
632+
633+ err = get_user(ldah, (unsigned int *)regs->pc);
634+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
635+ err |= get_user(br, (unsigned int *)(regs->pc+8));
636+
637+ if (err)
638+ break;
639+
640+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
641+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
642+ (br & 0xFFE00000U) == 0xC3E00000U)
643+ {
644+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
645+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
646+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
647+
648+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
649+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
650+ return 2;
651+ }
652+ } while (0);
653+
654+ do { /* PaX: unpatched PLT emulation */
655+ unsigned int br;
656+
657+ err = get_user(br, (unsigned int *)regs->pc);
658+
659+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
660+ unsigned int br2, ldq, nop, jmp;
661+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
662+
663+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
664+ err = get_user(br2, (unsigned int *)addr);
665+ err |= get_user(ldq, (unsigned int *)(addr+4));
666+ err |= get_user(nop, (unsigned int *)(addr+8));
667+ err |= get_user(jmp, (unsigned int *)(addr+12));
668+ err |= get_user(resolver, (unsigned long *)(addr+16));
669+
670+ if (err)
671+ break;
672+
673+ if (br2 == 0xC3600000U &&
674+ ldq == 0xA77B000CU &&
675+ nop == 0x47FF041FU &&
676+ jmp == 0x6B7B0000U)
677+ {
678+ regs->r28 = regs->pc+4;
679+ regs->r27 = addr+16;
680+ regs->pc = resolver;
681+ return 3;
682+ }
683+ }
684+ } while (0);
685+#endif
686+
687+ return 1;
688+}
689+
690+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
691+{
692+ unsigned long i;
693+
694+ printk(KERN_ERR "PAX: bytes at PC: ");
695+ for (i = 0; i < 5; i++) {
696+ unsigned int c;
697+ if (get_user(c, (unsigned int *)pc+i))
698+ printk(KERN_CONT "???????? ");
699+ else
700+ printk(KERN_CONT "%08x ", c);
701+ }
702+ printk("\n");
703+}
704+#endif
705
706 /*
707 * This routine handles page faults. It determines the address,
708@@ -130,8 +248,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
709 good_area:
710 si_code = SEGV_ACCERR;
711 if (cause < 0) {
712- if (!(vma->vm_flags & VM_EXEC))
713+ if (!(vma->vm_flags & VM_EXEC)) {
714+
715+#ifdef CONFIG_PAX_PAGEEXEC
716+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
717+ goto bad_area;
718+
719+ up_read(&mm->mmap_sem);
720+ switch (pax_handle_fetch_fault(regs)) {
721+
722+#ifdef CONFIG_PAX_EMUPLT
723+ case 2:
724+ case 3:
725+ return;
726+#endif
727+
728+ }
729+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
730+ do_group_exit(SIGKILL);
731+#else
732 goto bad_area;
733+#endif
734+
735+ }
736 } else if (!cause) {
737 /* Allow reads even for write-only mappings */
738 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
739diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
740index 68374ba..cff7196 100644
741--- a/arch/arm/include/asm/atomic.h
742+++ b/arch/arm/include/asm/atomic.h
743@@ -17,17 +17,35 @@
744 #include <asm/barrier.h>
745 #include <asm/cmpxchg.h>
746
747+#ifdef CONFIG_GENERIC_ATOMIC64
748+#include <asm-generic/atomic64.h>
749+#endif
750+
751 #define ATOMIC_INIT(i) { (i) }
752
753 #ifdef __KERNEL__
754
755+#define _ASM_EXTABLE(from, to) \
756+" .pushsection __ex_table,\"a\"\n"\
757+" .align 3\n" \
758+" .long " #from ", " #to"\n" \
759+" .popsection"
760+
761 /*
762 * On ARM, ordinary assignment (str instruction) doesn't clear the local
763 * strex/ldrex monitor on some implementations. The reason we can use it for
764 * atomic_set() is the clrex or dummy strex done on every exception return.
765 */
766 #define atomic_read(v) (*(volatile int *)&(v)->counter)
767+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
768+{
769+ return v->counter;
770+}
771 #define atomic_set(v,i) (((v)->counter) = (i))
772+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
773+{
774+ v->counter = i;
775+}
776
777 #if __LINUX_ARM_ARCH__ >= 6
778
779@@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
780 int result;
781
782 __asm__ __volatile__("@ atomic_add\n"
783+"1: ldrex %1, [%3]\n"
784+" adds %0, %1, %4\n"
785+
786+#ifdef CONFIG_PAX_REFCOUNT
787+" bvc 3f\n"
788+"2: bkpt 0xf103\n"
789+"3:\n"
790+#endif
791+
792+" strex %1, %0, [%3]\n"
793+" teq %1, #0\n"
794+" bne 1b"
795+
796+#ifdef CONFIG_PAX_REFCOUNT
797+"\n4:\n"
798+ _ASM_EXTABLE(2b, 4b)
799+#endif
800+
801+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
802+ : "r" (&v->counter), "Ir" (i)
803+ : "cc");
804+}
805+
806+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
807+{
808+ unsigned long tmp;
809+ int result;
810+
811+ __asm__ __volatile__("@ atomic_add_unchecked\n"
812 "1: ldrex %0, [%3]\n"
813 " add %0, %0, %4\n"
814 " strex %1, %0, [%3]\n"
815@@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
816 smp_mb();
817
818 __asm__ __volatile__("@ atomic_add_return\n"
819+"1: ldrex %1, [%3]\n"
820+" adds %0, %1, %4\n"
821+
822+#ifdef CONFIG_PAX_REFCOUNT
823+" bvc 3f\n"
824+" mov %0, %1\n"
825+"2: bkpt 0xf103\n"
826+"3:\n"
827+#endif
828+
829+" strex %1, %0, [%3]\n"
830+" teq %1, #0\n"
831+" bne 1b"
832+
833+#ifdef CONFIG_PAX_REFCOUNT
834+"\n4:\n"
835+ _ASM_EXTABLE(2b, 4b)
836+#endif
837+
838+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
839+ : "r" (&v->counter), "Ir" (i)
840+ : "cc");
841+
842+ smp_mb();
843+
844+ return result;
845+}
846+
847+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
848+{
849+ unsigned long tmp;
850+ int result;
851+
852+ smp_mb();
853+
854+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
855 "1: ldrex %0, [%3]\n"
856 " add %0, %0, %4\n"
857 " strex %1, %0, [%3]\n"
858@@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
859 int result;
860
861 __asm__ __volatile__("@ atomic_sub\n"
862+"1: ldrex %1, [%3]\n"
863+" subs %0, %1, %4\n"
864+
865+#ifdef CONFIG_PAX_REFCOUNT
866+" bvc 3f\n"
867+"2: bkpt 0xf103\n"
868+"3:\n"
869+#endif
870+
871+" strex %1, %0, [%3]\n"
872+" teq %1, #0\n"
873+" bne 1b"
874+
875+#ifdef CONFIG_PAX_REFCOUNT
876+"\n4:\n"
877+ _ASM_EXTABLE(2b, 4b)
878+#endif
879+
880+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
881+ : "r" (&v->counter), "Ir" (i)
882+ : "cc");
883+}
884+
885+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
886+{
887+ unsigned long tmp;
888+ int result;
889+
890+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
891 "1: ldrex %0, [%3]\n"
892 " sub %0, %0, %4\n"
893 " strex %1, %0, [%3]\n"
894@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
895 smp_mb();
896
897 __asm__ __volatile__("@ atomic_sub_return\n"
898-"1: ldrex %0, [%3]\n"
899-" sub %0, %0, %4\n"
900+"1: ldrex %1, [%3]\n"
901+" sub %0, %1, %4\n"
902+
903+#ifdef CONFIG_PAX_REFCOUNT
904+" bvc 3f\n"
905+" mov %0, %1\n"
906+"2: bkpt 0xf103\n"
907+"3:\n"
908+#endif
909+
910 " strex %1, %0, [%3]\n"
911 " teq %1, #0\n"
912 " bne 1b"
913+
914+#ifdef CONFIG_PAX_REFCOUNT
915+"\n4:\n"
916+ _ASM_EXTABLE(2b, 4b)
917+#endif
918+
919 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
920 : "r" (&v->counter), "Ir" (i)
921 : "cc");
922@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
923 return oldval;
924 }
925
926+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
927+{
928+ unsigned long oldval, res;
929+
930+ smp_mb();
931+
932+ do {
933+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
934+ "ldrex %1, [%3]\n"
935+ "mov %0, #0\n"
936+ "teq %1, %4\n"
937+ "strexeq %0, %5, [%3]\n"
938+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
939+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
940+ : "cc");
941+ } while (res);
942+
943+ smp_mb();
944+
945+ return oldval;
946+}
947+
948 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
949 {
950 unsigned long tmp, tmp2;
951@@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
952
953 return val;
954 }
955+
956+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
957+{
958+ return atomic_add_return(i, v);
959+}
960+
961 #define atomic_add(i, v) (void) atomic_add_return(i, v)
962+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
963+{
964+ (void) atomic_add_return(i, v);
965+}
966
967 static inline int atomic_sub_return(int i, atomic_t *v)
968 {
969@@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
970 return val;
971 }
972 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
973+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
974+{
975+ (void) atomic_sub_return(i, v);
976+}
977
978 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
979 {
980@@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
981 return ret;
982 }
983
984+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
985+{
986+ return atomic_cmpxchg(v, old, new);
987+}
988+
989 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
990 {
991 unsigned long flags;
992@@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
993 #endif /* __LINUX_ARM_ARCH__ */
994
995 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
996+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
997+{
998+ return xchg(&v->counter, new);
999+}
1000
1001 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1002 {
1003@@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1004 }
1005
1006 #define atomic_inc(v) atomic_add(1, v)
1007+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1008+{
1009+ atomic_add_unchecked(1, v);
1010+}
1011 #define atomic_dec(v) atomic_sub(1, v)
1012+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1013+{
1014+ atomic_sub_unchecked(1, v);
1015+}
1016
1017 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1018+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1019+{
1020+ return atomic_add_return_unchecked(1, v) == 0;
1021+}
1022 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1023 #define atomic_inc_return(v) (atomic_add_return(1, v))
1024+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1025+{
1026+ return atomic_add_return_unchecked(1, v);
1027+}
1028 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1029 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1030
1031@@ -241,6 +428,14 @@ typedef struct {
1032 u64 __aligned(8) counter;
1033 } atomic64_t;
1034
1035+#ifdef CONFIG_PAX_REFCOUNT
1036+typedef struct {
1037+ u64 __aligned(8) counter;
1038+} atomic64_unchecked_t;
1039+#else
1040+typedef atomic64_t atomic64_unchecked_t;
1041+#endif
1042+
1043 #define ATOMIC64_INIT(i) { (i) }
1044
1045 static inline u64 atomic64_read(atomic64_t *v)
1046@@ -256,6 +451,19 @@ static inline u64 atomic64_read(atomic64_t *v)
1047 return result;
1048 }
1049
1050+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1051+{
1052+ u64 result;
1053+
1054+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1055+" ldrexd %0, %H0, [%1]"
1056+ : "=&r" (result)
1057+ : "r" (&v->counter), "Qo" (v->counter)
1058+ );
1059+
1060+ return result;
1061+}
1062+
1063 static inline void atomic64_set(atomic64_t *v, u64 i)
1064 {
1065 u64 tmp;
1066@@ -270,6 +478,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1067 : "cc");
1068 }
1069
1070+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1071+{
1072+ u64 tmp;
1073+
1074+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1075+"1: ldrexd %0, %H0, [%2]\n"
1076+" strexd %0, %3, %H3, [%2]\n"
1077+" teq %0, #0\n"
1078+" bne 1b"
1079+ : "=&r" (tmp), "=Qo" (v->counter)
1080+ : "r" (&v->counter), "r" (i)
1081+ : "cc");
1082+}
1083+
1084 static inline void atomic64_add(u64 i, atomic64_t *v)
1085 {
1086 u64 result;
1087@@ -278,6 +500,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1088 __asm__ __volatile__("@ atomic64_add\n"
1089 "1: ldrexd %0, %H0, [%3]\n"
1090 " adds %0, %0, %4\n"
1091+" adcs %H0, %H0, %H4\n"
1092+
1093+#ifdef CONFIG_PAX_REFCOUNT
1094+" bvc 3f\n"
1095+"2: bkpt 0xf103\n"
1096+"3:\n"
1097+#endif
1098+
1099+" strexd %1, %0, %H0, [%3]\n"
1100+" teq %1, #0\n"
1101+" bne 1b"
1102+
1103+#ifdef CONFIG_PAX_REFCOUNT
1104+"\n4:\n"
1105+ _ASM_EXTABLE(2b, 4b)
1106+#endif
1107+
1108+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1109+ : "r" (&v->counter), "r" (i)
1110+ : "cc");
1111+}
1112+
1113+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1114+{
1115+ u64 result;
1116+ unsigned long tmp;
1117+
1118+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1119+"1: ldrexd %0, %H0, [%3]\n"
1120+" adds %0, %0, %4\n"
1121 " adc %H0, %H0, %H4\n"
1122 " strexd %1, %0, %H0, [%3]\n"
1123 " teq %1, #0\n"
1124@@ -289,12 +541,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1125
1126 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1127 {
1128- u64 result;
1129- unsigned long tmp;
1130+ u64 result, tmp;
1131
1132 smp_mb();
1133
1134 __asm__ __volatile__("@ atomic64_add_return\n"
1135+"1: ldrexd %1, %H1, [%3]\n"
1136+" adds %0, %1, %4\n"
1137+" adcs %H0, %H1, %H4\n"
1138+
1139+#ifdef CONFIG_PAX_REFCOUNT
1140+" bvc 3f\n"
1141+" mov %0, %1\n"
1142+" mov %H0, %H1\n"
1143+"2: bkpt 0xf103\n"
1144+"3:\n"
1145+#endif
1146+
1147+" strexd %1, %0, %H0, [%3]\n"
1148+" teq %1, #0\n"
1149+" bne 1b"
1150+
1151+#ifdef CONFIG_PAX_REFCOUNT
1152+"\n4:\n"
1153+ _ASM_EXTABLE(2b, 4b)
1154+#endif
1155+
1156+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1157+ : "r" (&v->counter), "r" (i)
1158+ : "cc");
1159+
1160+ smp_mb();
1161+
1162+ return result;
1163+}
1164+
1165+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1166+{
1167+ u64 result;
1168+ unsigned long tmp;
1169+
1170+ smp_mb();
1171+
1172+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1173 "1: ldrexd %0, %H0, [%3]\n"
1174 " adds %0, %0, %4\n"
1175 " adc %H0, %H0, %H4\n"
1176@@ -318,6 +607,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1177 __asm__ __volatile__("@ atomic64_sub\n"
1178 "1: ldrexd %0, %H0, [%3]\n"
1179 " subs %0, %0, %4\n"
1180+" sbcs %H0, %H0, %H4\n"
1181+
1182+#ifdef CONFIG_PAX_REFCOUNT
1183+" bvc 3f\n"
1184+"2: bkpt 0xf103\n"
1185+"3:\n"
1186+#endif
1187+
1188+" strexd %1, %0, %H0, [%3]\n"
1189+" teq %1, #0\n"
1190+" bne 1b"
1191+
1192+#ifdef CONFIG_PAX_REFCOUNT
1193+"\n4:\n"
1194+ _ASM_EXTABLE(2b, 4b)
1195+#endif
1196+
1197+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1198+ : "r" (&v->counter), "r" (i)
1199+ : "cc");
1200+}
1201+
1202+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1203+{
1204+ u64 result;
1205+ unsigned long tmp;
1206+
1207+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1208+"1: ldrexd %0, %H0, [%3]\n"
1209+" subs %0, %0, %4\n"
1210 " sbc %H0, %H0, %H4\n"
1211 " strexd %1, %0, %H0, [%3]\n"
1212 " teq %1, #0\n"
1213@@ -329,18 +648,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1214
1215 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1216 {
1217- u64 result;
1218- unsigned long tmp;
1219+ u64 result, tmp;
1220
1221 smp_mb();
1222
1223 __asm__ __volatile__("@ atomic64_sub_return\n"
1224-"1: ldrexd %0, %H0, [%3]\n"
1225-" subs %0, %0, %4\n"
1226-" sbc %H0, %H0, %H4\n"
1227+"1: ldrexd %1, %H1, [%3]\n"
1228+" subs %0, %1, %4\n"
1229+" sbc %H0, %H1, %H4\n"
1230+
1231+#ifdef CONFIG_PAX_REFCOUNT
1232+" bvc 3f\n"
1233+" mov %0, %1\n"
1234+" mov %H0, %H1\n"
1235+"2: bkpt 0xf103\n"
1236+"3:\n"
1237+#endif
1238+
1239 " strexd %1, %0, %H0, [%3]\n"
1240 " teq %1, #0\n"
1241 " bne 1b"
1242+
1243+#ifdef CONFIG_PAX_REFCOUNT
1244+"\n4:\n"
1245+ _ASM_EXTABLE(2b, 4b)
1246+#endif
1247+
1248 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1249 : "r" (&v->counter), "r" (i)
1250 : "cc");
1251@@ -374,6 +707,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1252 return oldval;
1253 }
1254
1255+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1256+{
1257+ u64 oldval;
1258+ unsigned long res;
1259+
1260+ smp_mb();
1261+
1262+ do {
1263+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1264+ "ldrexd %1, %H1, [%3]\n"
1265+ "mov %0, #0\n"
1266+ "teq %1, %4\n"
1267+ "teqeq %H1, %H4\n"
1268+ "strexdeq %0, %5, %H5, [%3]"
1269+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1270+ : "r" (&ptr->counter), "r" (old), "r" (new)
1271+ : "cc");
1272+ } while (res);
1273+
1274+ smp_mb();
1275+
1276+ return oldval;
1277+}
1278+
1279 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1280 {
1281 u64 result;
1282@@ -397,21 +754,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1283
1284 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1285 {
1286- u64 result;
1287- unsigned long tmp;
1288+ u64 result, tmp;
1289
1290 smp_mb();
1291
1292 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1293-"1: ldrexd %0, %H0, [%3]\n"
1294-" subs %0, %0, #1\n"
1295-" sbc %H0, %H0, #0\n"
1296+"1: ldrexd %1, %H1, [%3]\n"
1297+" subs %0, %1, #1\n"
1298+" sbc %H0, %H1, #0\n"
1299+
1300+#ifdef CONFIG_PAX_REFCOUNT
1301+" bvc 3f\n"
1302+" mov %0, %1\n"
1303+" mov %H0, %H1\n"
1304+"2: bkpt 0xf103\n"
1305+"3:\n"
1306+#endif
1307+
1308 " teq %H0, #0\n"
1309-" bmi 2f\n"
1310+" bmi 4f\n"
1311 " strexd %1, %0, %H0, [%3]\n"
1312 " teq %1, #0\n"
1313 " bne 1b\n"
1314-"2:"
1315+"4:\n"
1316+
1317+#ifdef CONFIG_PAX_REFCOUNT
1318+ _ASM_EXTABLE(2b, 4b)
1319+#endif
1320+
1321 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1322 : "r" (&v->counter)
1323 : "cc");
1324@@ -434,13 +804,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1325 " teq %0, %5\n"
1326 " teqeq %H0, %H5\n"
1327 " moveq %1, #0\n"
1328-" beq 2f\n"
1329+" beq 4f\n"
1330 " adds %0, %0, %6\n"
1331 " adc %H0, %H0, %H6\n"
1332+
1333+#ifdef CONFIG_PAX_REFCOUNT
1334+" bvc 3f\n"
1335+"2: bkpt 0xf103\n"
1336+"3:\n"
1337+#endif
1338+
1339 " strexd %2, %0, %H0, [%4]\n"
1340 " teq %2, #0\n"
1341 " bne 1b\n"
1342-"2:"
1343+"4:\n"
1344+
1345+#ifdef CONFIG_PAX_REFCOUNT
1346+ _ASM_EXTABLE(2b, 4b)
1347+#endif
1348+
1349 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1350 : "r" (&v->counter), "r" (u), "r" (a)
1351 : "cc");
1352@@ -453,10 +835,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1353
1354 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1355 #define atomic64_inc(v) atomic64_add(1LL, (v))
1356+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1357 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1358+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1359 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1360 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1361 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1362+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1363 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1364 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1365 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1366diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1367index 75fe66b..2255c86 100644
1368--- a/arch/arm/include/asm/cache.h
1369+++ b/arch/arm/include/asm/cache.h
1370@@ -4,8 +4,10 @@
1371 #ifndef __ASMARM_CACHE_H
1372 #define __ASMARM_CACHE_H
1373
1374+#include <linux/const.h>
1375+
1376 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1377-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1378+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1379
1380 /*
1381 * Memory returned by kmalloc() may be used for DMA, so we must make
1382diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1383index 1252a26..9dc17b5 100644
1384--- a/arch/arm/include/asm/cacheflush.h
1385+++ b/arch/arm/include/asm/cacheflush.h
1386@@ -108,7 +108,7 @@ struct cpu_cache_fns {
1387 void (*dma_unmap_area)(const void *, size_t, int);
1388
1389 void (*dma_flush_range)(const void *, const void *);
1390-};
1391+} __no_const;
1392
1393 /*
1394 * Select the calling method
1395diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1396index d41d7cb..9bea5e0 100644
1397--- a/arch/arm/include/asm/cmpxchg.h
1398+++ b/arch/arm/include/asm/cmpxchg.h
1399@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1400
1401 #define xchg(ptr,x) \
1402 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1403+#define xchg_unchecked(ptr,x) \
1404+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1405
1406 #include <asm-generic/cmpxchg-local.h>
1407
1408diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1409index 38050b1..9d90e8b 100644
1410--- a/arch/arm/include/asm/elf.h
1411+++ b/arch/arm/include/asm/elf.h
1412@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1413 the loader. We need to make sure that it is out of the way of the program
1414 that it will "exec", and that there is sufficient room for the brk. */
1415
1416-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1417+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1418+
1419+#ifdef CONFIG_PAX_ASLR
1420+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1421+
1422+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1423+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1424+#endif
1425
1426 /* When the program starts, a1 contains a pointer to a function to be
1427 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1428@@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1429 extern void elf_set_personality(const struct elf32_hdr *);
1430 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1431
1432-struct mm_struct;
1433-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1434-#define arch_randomize_brk arch_randomize_brk
1435-
1436 #endif
1437diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1438index e51b1e8..32a3113 100644
1439--- a/arch/arm/include/asm/kmap_types.h
1440+++ b/arch/arm/include/asm/kmap_types.h
1441@@ -21,6 +21,7 @@ enum km_type {
1442 KM_L1_CACHE,
1443 KM_L2_CACHE,
1444 KM_KDB,
1445+ KM_CLEARPAGE,
1446 KM_TYPE_NR
1447 };
1448
1449diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1450index 53426c6..c7baff3 100644
1451--- a/arch/arm/include/asm/outercache.h
1452+++ b/arch/arm/include/asm/outercache.h
1453@@ -35,7 +35,7 @@ struct outer_cache_fns {
1454 #endif
1455 void (*set_debug)(unsigned long);
1456 void (*resume)(void);
1457-};
1458+} __no_const;
1459
1460 #ifdef CONFIG_OUTER_CACHE
1461
1462diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1463index 5838361..da6e813 100644
1464--- a/arch/arm/include/asm/page.h
1465+++ b/arch/arm/include/asm/page.h
1466@@ -123,7 +123,7 @@ struct cpu_user_fns {
1467 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1468 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1469 unsigned long vaddr, struct vm_area_struct *vma);
1470-};
1471+} __no_const;
1472
1473 #ifdef MULTI_USER
1474 extern struct cpu_user_fns cpu_user;
1475diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1476index 943504f..bf8d667 100644
1477--- a/arch/arm/include/asm/pgalloc.h
1478+++ b/arch/arm/include/asm/pgalloc.h
1479@@ -43,6 +43,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1480 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1481 }
1482
1483+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1484+{
1485+ pud_populate(mm, pud, pmd);
1486+}
1487+
1488 #else /* !CONFIG_ARM_LPAE */
1489
1490 /*
1491@@ -51,6 +56,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1492 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1493 #define pmd_free(mm, pmd) do { } while (0)
1494 #define pud_populate(mm,pmd,pte) BUG()
1495+#define pud_populate_kernel(mm,pmd,pte) BUG()
1496
1497 #endif /* CONFIG_ARM_LPAE */
1498
1499diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
1500index 0f04d84..2be5648 100644
1501--- a/arch/arm/include/asm/thread_info.h
1502+++ b/arch/arm/include/asm/thread_info.h
1503@@ -148,6 +148,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1504 #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
1505 #define TIF_SYSCALL_TRACE 8
1506 #define TIF_SYSCALL_AUDIT 9
1507+
1508+/* within 8 bits of TIF_SYSCALL_TRACE
1509+ to meet flexible second operand requirements
1510+*/
1511+#define TIF_GRSEC_SETXID 10
1512+
1513 #define TIF_POLLING_NRFLAG 16
1514 #define TIF_USING_IWMMXT 17
1515 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
1516@@ -163,9 +169,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1517 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
1518 #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
1519 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
1520+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
1521
1522 /* Checks for any syscall work in entry-common.S */
1523-#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
1524+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
1525+ _TIF_GRSEC_SETXID)
1526
1527 /*
1528 * Change these and you break ASM code in entry-common.S
1529diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1530index 71f6536..602f279 100644
1531--- a/arch/arm/include/asm/uaccess.h
1532+++ b/arch/arm/include/asm/uaccess.h
1533@@ -22,6 +22,8 @@
1534 #define VERIFY_READ 0
1535 #define VERIFY_WRITE 1
1536
1537+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1538+
1539 /*
1540 * The exception table consists of pairs of addresses: the first is the
1541 * address of an instruction that is allowed to fault, and the second is
1542@@ -387,8 +389,23 @@ do { \
1543
1544
1545 #ifdef CONFIG_MMU
1546-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1547-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1548+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1549+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1550+
1551+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1552+{
1553+ if (!__builtin_constant_p(n))
1554+ check_object_size(to, n, false);
1555+ return ___copy_from_user(to, from, n);
1556+}
1557+
1558+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1559+{
1560+ if (!__builtin_constant_p(n))
1561+ check_object_size(from, n, true);
1562+ return ___copy_to_user(to, from, n);
1563+}
1564+
1565 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1566 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1567 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1568@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
1569
1570 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1571 {
1572+ if ((long)n < 0)
1573+ return n;
1574+
1575 if (access_ok(VERIFY_READ, from, n))
1576 n = __copy_from_user(to, from, n);
1577 else /* security hole - plug it */
1578@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1579
1580 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1581 {
1582+ if ((long)n < 0)
1583+ return n;
1584+
1585 if (access_ok(VERIFY_WRITE, to, n))
1586 n = __copy_to_user(to, from, n);
1587 return n;
1588diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1589index b57c75e..ed2d6b2 100644
1590--- a/arch/arm/kernel/armksyms.c
1591+++ b/arch/arm/kernel/armksyms.c
1592@@ -94,8 +94,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
1593 #ifdef CONFIG_MMU
1594 EXPORT_SYMBOL(copy_page);
1595
1596-EXPORT_SYMBOL(__copy_from_user);
1597-EXPORT_SYMBOL(__copy_to_user);
1598+EXPORT_SYMBOL(___copy_from_user);
1599+EXPORT_SYMBOL(___copy_to_user);
1600 EXPORT_SYMBOL(__clear_user);
1601
1602 EXPORT_SYMBOL(__get_user_1);
1603diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1604index 2b7b017..c380fa2 100644
1605--- a/arch/arm/kernel/process.c
1606+++ b/arch/arm/kernel/process.c
1607@@ -28,7 +28,6 @@
1608 #include <linux/tick.h>
1609 #include <linux/utsname.h>
1610 #include <linux/uaccess.h>
1611-#include <linux/random.h>
1612 #include <linux/hw_breakpoint.h>
1613 #include <linux/cpuidle.h>
1614
1615@@ -275,9 +274,10 @@ void machine_power_off(void)
1616 machine_shutdown();
1617 if (pm_power_off)
1618 pm_power_off();
1619+ BUG();
1620 }
1621
1622-void machine_restart(char *cmd)
1623+__noreturn void machine_restart(char *cmd)
1624 {
1625 machine_shutdown();
1626
1627@@ -519,12 +519,6 @@ unsigned long get_wchan(struct task_struct *p)
1628 return 0;
1629 }
1630
1631-unsigned long arch_randomize_brk(struct mm_struct *mm)
1632-{
1633- unsigned long range_end = mm->brk + 0x02000000;
1634- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
1635-}
1636-
1637 #ifdef CONFIG_MMU
1638 /*
1639 * The vectors page is always readable from user space for the
1640diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
1641index 9650c14..ae30cdd 100644
1642--- a/arch/arm/kernel/ptrace.c
1643+++ b/arch/arm/kernel/ptrace.c
1644@@ -906,10 +906,19 @@ long arch_ptrace(struct task_struct *child, long request,
1645 return ret;
1646 }
1647
1648+#ifdef CONFIG_GRKERNSEC_SETXID
1649+extern void gr_delayed_cred_worker(void);
1650+#endif
1651+
1652 asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
1653 {
1654 unsigned long ip;
1655
1656+#ifdef CONFIG_GRKERNSEC_SETXID
1657+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
1658+ gr_delayed_cred_worker();
1659+#endif
1660+
1661 if (why)
1662 audit_syscall_exit(regs);
1663 else
1664diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
1665index ebfac78..cbea9c0 100644
1666--- a/arch/arm/kernel/setup.c
1667+++ b/arch/arm/kernel/setup.c
1668@@ -111,13 +111,13 @@ struct processor processor __read_mostly;
1669 struct cpu_tlb_fns cpu_tlb __read_mostly;
1670 #endif
1671 #ifdef MULTI_USER
1672-struct cpu_user_fns cpu_user __read_mostly;
1673+struct cpu_user_fns cpu_user __read_only;
1674 #endif
1675 #ifdef MULTI_CACHE
1676-struct cpu_cache_fns cpu_cache __read_mostly;
1677+struct cpu_cache_fns cpu_cache __read_only;
1678 #endif
1679 #ifdef CONFIG_OUTER_CACHE
1680-struct outer_cache_fns outer_cache __read_mostly;
1681+struct outer_cache_fns outer_cache __read_only;
1682 EXPORT_SYMBOL(outer_cache);
1683 #endif
1684
1685diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
1686index 63d402f..db1d714 100644
1687--- a/arch/arm/kernel/traps.c
1688+++ b/arch/arm/kernel/traps.c
1689@@ -264,6 +264,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
1690
1691 static DEFINE_RAW_SPINLOCK(die_lock);
1692
1693+extern void gr_handle_kernel_exploit(void);
1694+
1695 /*
1696 * This function is protected against re-entrancy.
1697 */
1698@@ -296,6 +298,9 @@ void die(const char *str, struct pt_regs *regs, int err)
1699 panic("Fatal exception in interrupt");
1700 if (panic_on_oops)
1701 panic("Fatal exception");
1702+
1703+ gr_handle_kernel_exploit();
1704+
1705 if (ret != NOTIFY_STOP)
1706 do_exit(SIGSEGV);
1707 }
1708diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1709index 66a477a..bee61d3 100644
1710--- a/arch/arm/lib/copy_from_user.S
1711+++ b/arch/arm/lib/copy_from_user.S
1712@@ -16,7 +16,7 @@
1713 /*
1714 * Prototype:
1715 *
1716- * size_t __copy_from_user(void *to, const void *from, size_t n)
1717+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
1718 *
1719 * Purpose:
1720 *
1721@@ -84,11 +84,11 @@
1722
1723 .text
1724
1725-ENTRY(__copy_from_user)
1726+ENTRY(___copy_from_user)
1727
1728 #include "copy_template.S"
1729
1730-ENDPROC(__copy_from_user)
1731+ENDPROC(___copy_from_user)
1732
1733 .pushsection .fixup,"ax"
1734 .align 0
1735diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1736index 6ee2f67..d1cce76 100644
1737--- a/arch/arm/lib/copy_page.S
1738+++ b/arch/arm/lib/copy_page.S
1739@@ -10,6 +10,7 @@
1740 * ASM optimised string functions
1741 */
1742 #include <linux/linkage.h>
1743+#include <linux/const.h>
1744 #include <asm/assembler.h>
1745 #include <asm/asm-offsets.h>
1746 #include <asm/cache.h>
1747diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1748index d066df6..df28194 100644
1749--- a/arch/arm/lib/copy_to_user.S
1750+++ b/arch/arm/lib/copy_to_user.S
1751@@ -16,7 +16,7 @@
1752 /*
1753 * Prototype:
1754 *
1755- * size_t __copy_to_user(void *to, const void *from, size_t n)
1756+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
1757 *
1758 * Purpose:
1759 *
1760@@ -88,11 +88,11 @@
1761 .text
1762
1763 ENTRY(__copy_to_user_std)
1764-WEAK(__copy_to_user)
1765+WEAK(___copy_to_user)
1766
1767 #include "copy_template.S"
1768
1769-ENDPROC(__copy_to_user)
1770+ENDPROC(___copy_to_user)
1771 ENDPROC(__copy_to_user_std)
1772
1773 .pushsection .fixup,"ax"
1774diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
1775index 5c908b1..e712687 100644
1776--- a/arch/arm/lib/uaccess.S
1777+++ b/arch/arm/lib/uaccess.S
1778@@ -20,7 +20,7 @@
1779
1780 #define PAGE_SHIFT 12
1781
1782-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
1783+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
1784 * Purpose : copy a block to user memory from kernel memory
1785 * Params : to - user memory
1786 * : from - kernel memory
1787@@ -40,7 +40,7 @@ USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1788 sub r2, r2, ip
1789 b .Lc2u_dest_aligned
1790
1791-ENTRY(__copy_to_user)
1792+ENTRY(___copy_to_user)
1793 stmfd sp!, {r2, r4 - r7, lr}
1794 cmp r2, #4
1795 blt .Lc2u_not_enough
1796@@ -278,14 +278,14 @@ USER( TUSER( strgeb) r3, [r0], #1) @ May fault
1797 ldrgtb r3, [r1], #0
1798 USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1799 b .Lc2u_finished
1800-ENDPROC(__copy_to_user)
1801+ENDPROC(___copy_to_user)
1802
1803 .pushsection .fixup,"ax"
1804 .align 0
1805 9001: ldmfd sp!, {r0, r4 - r7, pc}
1806 .popsection
1807
1808-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
1809+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
1810 * Purpose : copy a block from user memory to kernel memory
1811 * Params : to - kernel memory
1812 * : from - user memory
1813@@ -304,7 +304,7 @@ USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1814 sub r2, r2, ip
1815 b .Lcfu_dest_aligned
1816
1817-ENTRY(__copy_from_user)
1818+ENTRY(___copy_from_user)
1819 stmfd sp!, {r0, r2, r4 - r7, lr}
1820 cmp r2, #4
1821 blt .Lcfu_not_enough
1822@@ -544,7 +544,7 @@ USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
1823 USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1824 strgtb r3, [r0], #1
1825 b .Lcfu_finished
1826-ENDPROC(__copy_from_user)
1827+ENDPROC(___copy_from_user)
1828
1829 .pushsection .fixup,"ax"
1830 .align 0
1831diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
1832index 025f742..8432b08 100644
1833--- a/arch/arm/lib/uaccess_with_memcpy.c
1834+++ b/arch/arm/lib/uaccess_with_memcpy.c
1835@@ -104,7 +104,7 @@ out:
1836 }
1837
1838 unsigned long
1839-__copy_to_user(void __user *to, const void *from, unsigned long n)
1840+___copy_to_user(void __user *to, const void *from, unsigned long n)
1841 {
1842 /*
1843 * This test is stubbed out of the main function above to keep
1844diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
1845index 518091c..eae9a76 100644
1846--- a/arch/arm/mach-omap2/board-n8x0.c
1847+++ b/arch/arm/mach-omap2/board-n8x0.c
1848@@ -596,7 +596,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
1849 }
1850 #endif
1851
1852-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
1853+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
1854 .late_init = n8x0_menelaus_late_init,
1855 };
1856
1857diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1858index 5bb4835..4760f68 100644
1859--- a/arch/arm/mm/fault.c
1860+++ b/arch/arm/mm/fault.c
1861@@ -174,6 +174,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1862 }
1863 #endif
1864
1865+#ifdef CONFIG_PAX_PAGEEXEC
1866+ if (fsr & FSR_LNX_PF) {
1867+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1868+ do_group_exit(SIGKILL);
1869+ }
1870+#endif
1871+
1872 tsk->thread.address = addr;
1873 tsk->thread.error_code = fsr;
1874 tsk->thread.trap_no = 14;
1875@@ -397,6 +404,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1876 }
1877 #endif /* CONFIG_MMU */
1878
1879+#ifdef CONFIG_PAX_PAGEEXEC
1880+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1881+{
1882+ long i;
1883+
1884+ printk(KERN_ERR "PAX: bytes at PC: ");
1885+ for (i = 0; i < 20; i++) {
1886+ unsigned char c;
1887+ if (get_user(c, (__force unsigned char __user *)pc+i))
1888+ printk(KERN_CONT "?? ");
1889+ else
1890+ printk(KERN_CONT "%02x ", c);
1891+ }
1892+ printk("\n");
1893+
1894+ printk(KERN_ERR "PAX: bytes at SP-4: ");
1895+ for (i = -1; i < 20; i++) {
1896+ unsigned long c;
1897+ if (get_user(c, (__force unsigned long __user *)sp+i))
1898+ printk(KERN_CONT "???????? ");
1899+ else
1900+ printk(KERN_CONT "%08lx ", c);
1901+ }
1902+ printk("\n");
1903+}
1904+#endif
1905+
1906 /*
1907 * First Level Translation Fault Handler
1908 *
1909@@ -577,6 +611,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
1910 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1911 struct siginfo info;
1912
1913+#ifdef CONFIG_PAX_REFCOUNT
1914+ if (fsr_fs(ifsr) == 2) {
1915+ unsigned int bkpt;
1916+
1917+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1918+ current->thread.error_code = ifsr;
1919+ current->thread.trap_no = 0;
1920+ pax_report_refcount_overflow(regs);
1921+ fixup_exception(regs);
1922+ return;
1923+ }
1924+ }
1925+#endif
1926+
1927 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1928 return;
1929
1930diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1931index ce8cb19..3ec539d 100644
1932--- a/arch/arm/mm/mmap.c
1933+++ b/arch/arm/mm/mmap.c
1934@@ -93,6 +93,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1935 if (len > TASK_SIZE)
1936 return -ENOMEM;
1937
1938+#ifdef CONFIG_PAX_RANDMMAP
1939+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1940+#endif
1941+
1942 if (addr) {
1943 if (do_align)
1944 addr = COLOUR_ALIGN(addr, pgoff);
1945@@ -100,15 +104,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1946 addr = PAGE_ALIGN(addr);
1947
1948 vma = find_vma(mm, addr);
1949- if (TASK_SIZE - len >= addr &&
1950- (!vma || addr + len <= vma->vm_start))
1951+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1952 return addr;
1953 }
1954 if (len > mm->cached_hole_size) {
1955- start_addr = addr = mm->free_area_cache;
1956+ start_addr = addr = mm->free_area_cache;
1957 } else {
1958- start_addr = addr = mm->mmap_base;
1959- mm->cached_hole_size = 0;
1960+ start_addr = addr = mm->mmap_base;
1961+ mm->cached_hole_size = 0;
1962 }
1963
1964 full_search:
1965@@ -124,14 +127,14 @@ full_search:
1966 * Start a new search - just in case we missed
1967 * some holes.
1968 */
1969- if (start_addr != TASK_UNMAPPED_BASE) {
1970- start_addr = addr = TASK_UNMAPPED_BASE;
1971+ if (start_addr != mm->mmap_base) {
1972+ start_addr = addr = mm->mmap_base;
1973 mm->cached_hole_size = 0;
1974 goto full_search;
1975 }
1976 return -ENOMEM;
1977 }
1978- if (!vma || addr + len <= vma->vm_start) {
1979+ if (check_heap_stack_gap(vma, addr, len)) {
1980 /*
1981 * Remember the place where we stopped the search:
1982 */
1983@@ -266,10 +269,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1984
1985 if (mmap_is_legacy()) {
1986 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
1987+
1988+#ifdef CONFIG_PAX_RANDMMAP
1989+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1990+ mm->mmap_base += mm->delta_mmap;
1991+#endif
1992+
1993 mm->get_unmapped_area = arch_get_unmapped_area;
1994 mm->unmap_area = arch_unmap_area;
1995 } else {
1996 mm->mmap_base = mmap_base(random_factor);
1997+
1998+#ifdef CONFIG_PAX_RANDMMAP
1999+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2000+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2001+#endif
2002+
2003 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2004 mm->unmap_area = arch_unmap_area_topdown;
2005 }
2006diff --git a/arch/arm/plat-orion/include/plat/addr-map.h b/arch/arm/plat-orion/include/plat/addr-map.h
2007index fd556f7..af2e7d2 100644
2008--- a/arch/arm/plat-orion/include/plat/addr-map.h
2009+++ b/arch/arm/plat-orion/include/plat/addr-map.h
2010@@ -26,7 +26,7 @@ struct orion_addr_map_cfg {
2011 value in bridge_virt_base */
2012 void __iomem *(*win_cfg_base) (const struct orion_addr_map_cfg *cfg,
2013 const int win);
2014-};
2015+} __no_const;
2016
2017 /*
2018 * Information needed to setup one address mapping.
2019diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
2020index 71a6827..e7fbc23 100644
2021--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
2022+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
2023@@ -43,7 +43,7 @@ struct samsung_dma_ops {
2024 int (*started)(unsigned ch);
2025 int (*flush)(unsigned ch);
2026 int (*stop)(unsigned ch);
2027-};
2028+} __no_const;
2029
2030 extern void *samsung_dmadev_get_ops(void);
2031 extern void *s3c_dma_get_ops(void);
2032diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h
2033index 5f28cae..3d23723 100644
2034--- a/arch/arm/plat-samsung/include/plat/ehci.h
2035+++ b/arch/arm/plat-samsung/include/plat/ehci.h
2036@@ -14,7 +14,7 @@
2037 struct s5p_ehci_platdata {
2038 int (*phy_init)(struct platform_device *pdev, int type);
2039 int (*phy_exit)(struct platform_device *pdev, int type);
2040-};
2041+} __no_const;
2042
2043 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
2044
2045diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
2046index c3a58a1..78fbf54 100644
2047--- a/arch/avr32/include/asm/cache.h
2048+++ b/arch/avr32/include/asm/cache.h
2049@@ -1,8 +1,10 @@
2050 #ifndef __ASM_AVR32_CACHE_H
2051 #define __ASM_AVR32_CACHE_H
2052
2053+#include <linux/const.h>
2054+
2055 #define L1_CACHE_SHIFT 5
2056-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2057+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2058
2059 /*
2060 * Memory returned by kmalloc() may be used for DMA, so we must make
2061diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
2062index 3b3159b..425ea94 100644
2063--- a/arch/avr32/include/asm/elf.h
2064+++ b/arch/avr32/include/asm/elf.h
2065@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
2066 the loader. We need to make sure that it is out of the way of the program
2067 that it will "exec", and that there is sufficient room for the brk. */
2068
2069-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
2070+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2071
2072+#ifdef CONFIG_PAX_ASLR
2073+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
2074+
2075+#define PAX_DELTA_MMAP_LEN 15
2076+#define PAX_DELTA_STACK_LEN 15
2077+#endif
2078
2079 /* This yields a mask that user programs can use to figure out what
2080 instruction set this CPU supports. This could be done in user space,
2081diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
2082index b7f5c68..556135c 100644
2083--- a/arch/avr32/include/asm/kmap_types.h
2084+++ b/arch/avr32/include/asm/kmap_types.h
2085@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
2086 D(11) KM_IRQ1,
2087 D(12) KM_SOFTIRQ0,
2088 D(13) KM_SOFTIRQ1,
2089-D(14) KM_TYPE_NR
2090+D(14) KM_CLEARPAGE,
2091+D(15) KM_TYPE_NR
2092 };
2093
2094 #undef D
2095diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
2096index f7040a1..db9f300 100644
2097--- a/arch/avr32/mm/fault.c
2098+++ b/arch/avr32/mm/fault.c
2099@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
2100
2101 int exception_trace = 1;
2102
2103+#ifdef CONFIG_PAX_PAGEEXEC
2104+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2105+{
2106+ unsigned long i;
2107+
2108+ printk(KERN_ERR "PAX: bytes at PC: ");
2109+ for (i = 0; i < 20; i++) {
2110+ unsigned char c;
2111+ if (get_user(c, (unsigned char *)pc+i))
2112+ printk(KERN_CONT "???????? ");
2113+ else
2114+ printk(KERN_CONT "%02x ", c);
2115+ }
2116+ printk("\n");
2117+}
2118+#endif
2119+
2120 /*
2121 * This routine handles page faults. It determines the address and the
2122 * problem, and then passes it off to one of the appropriate routines.
2123@@ -156,6 +173,16 @@ bad_area:
2124 up_read(&mm->mmap_sem);
2125
2126 if (user_mode(regs)) {
2127+
2128+#ifdef CONFIG_PAX_PAGEEXEC
2129+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2130+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
2131+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
2132+ do_group_exit(SIGKILL);
2133+ }
2134+ }
2135+#endif
2136+
2137 if (exception_trace && printk_ratelimit())
2138 printk("%s%s[%d]: segfault at %08lx pc %08lx "
2139 "sp %08lx ecr %lu\n",
2140diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
2141index 568885a..f8008df 100644
2142--- a/arch/blackfin/include/asm/cache.h
2143+++ b/arch/blackfin/include/asm/cache.h
2144@@ -7,6 +7,7 @@
2145 #ifndef __ARCH_BLACKFIN_CACHE_H
2146 #define __ARCH_BLACKFIN_CACHE_H
2147
2148+#include <linux/const.h>
2149 #include <linux/linkage.h> /* for asmlinkage */
2150
2151 /*
2152@@ -14,7 +15,7 @@
2153 * Blackfin loads 32 bytes for cache
2154 */
2155 #define L1_CACHE_SHIFT 5
2156-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2157+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2158 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2159
2160 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2161diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
2162index aea2718..3639a60 100644
2163--- a/arch/cris/include/arch-v10/arch/cache.h
2164+++ b/arch/cris/include/arch-v10/arch/cache.h
2165@@ -1,8 +1,9 @@
2166 #ifndef _ASM_ARCH_CACHE_H
2167 #define _ASM_ARCH_CACHE_H
2168
2169+#include <linux/const.h>
2170 /* Etrax 100LX have 32-byte cache-lines. */
2171-#define L1_CACHE_BYTES 32
2172 #define L1_CACHE_SHIFT 5
2173+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2174
2175 #endif /* _ASM_ARCH_CACHE_H */
2176diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
2177index 1de779f..336fad3 100644
2178--- a/arch/cris/include/arch-v32/arch/cache.h
2179+++ b/arch/cris/include/arch-v32/arch/cache.h
2180@@ -1,11 +1,12 @@
2181 #ifndef _ASM_CRIS_ARCH_CACHE_H
2182 #define _ASM_CRIS_ARCH_CACHE_H
2183
2184+#include <linux/const.h>
2185 #include <arch/hwregs/dma.h>
2186
2187 /* A cache-line is 32 bytes. */
2188-#define L1_CACHE_BYTES 32
2189 #define L1_CACHE_SHIFT 5
2190+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2191
2192 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
2193
2194diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
2195index b86329d..6709906 100644
2196--- a/arch/frv/include/asm/atomic.h
2197+++ b/arch/frv/include/asm/atomic.h
2198@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
2199 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
2200 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
2201
2202+#define atomic64_read_unchecked(v) atomic64_read(v)
2203+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2204+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2205+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2206+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2207+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2208+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2209+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2210+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2211+
2212 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
2213 {
2214 int c, old;
2215diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
2216index 2797163..c2a401d 100644
2217--- a/arch/frv/include/asm/cache.h
2218+++ b/arch/frv/include/asm/cache.h
2219@@ -12,10 +12,11 @@
2220 #ifndef __ASM_CACHE_H
2221 #define __ASM_CACHE_H
2222
2223+#include <linux/const.h>
2224
2225 /* bytes per L1 cache line */
2226 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
2227-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2228+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2229
2230 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2231 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2232diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
2233index f8e16b2..c73ff79 100644
2234--- a/arch/frv/include/asm/kmap_types.h
2235+++ b/arch/frv/include/asm/kmap_types.h
2236@@ -23,6 +23,7 @@ enum km_type {
2237 KM_IRQ1,
2238 KM_SOFTIRQ0,
2239 KM_SOFTIRQ1,
2240+ KM_CLEARPAGE,
2241 KM_TYPE_NR
2242 };
2243
2244diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
2245index 385fd30..6c3d97e 100644
2246--- a/arch/frv/mm/elf-fdpic.c
2247+++ b/arch/frv/mm/elf-fdpic.c
2248@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2249 if (addr) {
2250 addr = PAGE_ALIGN(addr);
2251 vma = find_vma(current->mm, addr);
2252- if (TASK_SIZE - len >= addr &&
2253- (!vma || addr + len <= vma->vm_start))
2254+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2255 goto success;
2256 }
2257
2258@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2259 for (; vma; vma = vma->vm_next) {
2260 if (addr > limit)
2261 break;
2262- if (addr + len <= vma->vm_start)
2263+ if (check_heap_stack_gap(vma, addr, len))
2264 goto success;
2265 addr = vma->vm_end;
2266 }
2267@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2268 for (; vma; vma = vma->vm_next) {
2269 if (addr > limit)
2270 break;
2271- if (addr + len <= vma->vm_start)
2272+ if (check_heap_stack_gap(vma, addr, len))
2273 goto success;
2274 addr = vma->vm_end;
2275 }
2276diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
2277index c635028..6d9445a 100644
2278--- a/arch/h8300/include/asm/cache.h
2279+++ b/arch/h8300/include/asm/cache.h
2280@@ -1,8 +1,10 @@
2281 #ifndef __ARCH_H8300_CACHE_H
2282 #define __ARCH_H8300_CACHE_H
2283
2284+#include <linux/const.h>
2285+
2286 /* bytes per L1 cache line */
2287-#define L1_CACHE_BYTES 4
2288+#define L1_CACHE_BYTES _AC(4,UL)
2289
2290 /* m68k-elf-gcc 2.95.2 doesn't like these */
2291
2292diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
2293index 0f01de2..d37d309 100644
2294--- a/arch/hexagon/include/asm/cache.h
2295+++ b/arch/hexagon/include/asm/cache.h
2296@@ -21,9 +21,11 @@
2297 #ifndef __ASM_CACHE_H
2298 #define __ASM_CACHE_H
2299
2300+#include <linux/const.h>
2301+
2302 /* Bytes per L1 cache line */
2303-#define L1_CACHE_SHIFT (5)
2304-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2305+#define L1_CACHE_SHIFT 5
2306+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2307
2308 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
2309 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
2310diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2311index 7d91166..88ab87e 100644
2312--- a/arch/ia64/include/asm/atomic.h
2313+++ b/arch/ia64/include/asm/atomic.h
2314@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2315 #define atomic64_inc(v) atomic64_add(1, (v))
2316 #define atomic64_dec(v) atomic64_sub(1, (v))
2317
2318+#define atomic64_read_unchecked(v) atomic64_read(v)
2319+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2320+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2321+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2322+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2323+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2324+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2325+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2326+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2327+
2328 /* Atomic operations are already serializing */
2329 #define smp_mb__before_atomic_dec() barrier()
2330 #define smp_mb__after_atomic_dec() barrier()
2331diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2332index 988254a..e1ee885 100644
2333--- a/arch/ia64/include/asm/cache.h
2334+++ b/arch/ia64/include/asm/cache.h
2335@@ -1,6 +1,7 @@
2336 #ifndef _ASM_IA64_CACHE_H
2337 #define _ASM_IA64_CACHE_H
2338
2339+#include <linux/const.h>
2340
2341 /*
2342 * Copyright (C) 1998-2000 Hewlett-Packard Co
2343@@ -9,7 +10,7 @@
2344
2345 /* Bytes per L1 (data) cache line. */
2346 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2347-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2348+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2349
2350 #ifdef CONFIG_SMP
2351 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2352diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2353index b5298eb..67c6e62 100644
2354--- a/arch/ia64/include/asm/elf.h
2355+++ b/arch/ia64/include/asm/elf.h
2356@@ -42,6 +42,13 @@
2357 */
2358 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2359
2360+#ifdef CONFIG_PAX_ASLR
2361+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2362+
2363+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2364+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2365+#endif
2366+
2367 #define PT_IA_64_UNWIND 0x70000001
2368
2369 /* IA-64 relocations: */
2370diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
2371index 96a8d92..617a1cf 100644
2372--- a/arch/ia64/include/asm/pgalloc.h
2373+++ b/arch/ia64/include/asm/pgalloc.h
2374@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2375 pgd_val(*pgd_entry) = __pa(pud);
2376 }
2377
2378+static inline void
2379+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2380+{
2381+ pgd_populate(mm, pgd_entry, pud);
2382+}
2383+
2384 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
2385 {
2386 return quicklist_alloc(0, GFP_KERNEL, NULL);
2387@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2388 pud_val(*pud_entry) = __pa(pmd);
2389 }
2390
2391+static inline void
2392+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2393+{
2394+ pud_populate(mm, pud_entry, pmd);
2395+}
2396+
2397 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
2398 {
2399 return quicklist_alloc(0, GFP_KERNEL, NULL);
2400diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
2401index 815810c..d60bd4c 100644
2402--- a/arch/ia64/include/asm/pgtable.h
2403+++ b/arch/ia64/include/asm/pgtable.h
2404@@ -12,7 +12,7 @@
2405 * David Mosberger-Tang <davidm@hpl.hp.com>
2406 */
2407
2408-
2409+#include <linux/const.h>
2410 #include <asm/mman.h>
2411 #include <asm/page.h>
2412 #include <asm/processor.h>
2413@@ -142,6 +142,17 @@
2414 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2415 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2416 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2417+
2418+#ifdef CONFIG_PAX_PAGEEXEC
2419+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2420+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2421+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2422+#else
2423+# define PAGE_SHARED_NOEXEC PAGE_SHARED
2424+# define PAGE_READONLY_NOEXEC PAGE_READONLY
2425+# define PAGE_COPY_NOEXEC PAGE_COPY
2426+#endif
2427+
2428 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2429 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2430 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
2431diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
2432index 54ff557..70c88b7 100644
2433--- a/arch/ia64/include/asm/spinlock.h
2434+++ b/arch/ia64/include/asm/spinlock.h
2435@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
2436 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2437
2438 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2439- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2440+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2441 }
2442
2443 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
2444diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2445index 449c8c0..432a3d2 100644
2446--- a/arch/ia64/include/asm/uaccess.h
2447+++ b/arch/ia64/include/asm/uaccess.h
2448@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2449 const void *__cu_from = (from); \
2450 long __cu_len = (n); \
2451 \
2452- if (__access_ok(__cu_to, __cu_len, get_fs())) \
2453+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
2454 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2455 __cu_len; \
2456 })
2457@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2458 long __cu_len = (n); \
2459 \
2460 __chk_user_ptr(__cu_from); \
2461- if (__access_ok(__cu_from, __cu_len, get_fs())) \
2462+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
2463 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2464 __cu_len; \
2465 })
2466diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2467index 24603be..948052d 100644
2468--- a/arch/ia64/kernel/module.c
2469+++ b/arch/ia64/kernel/module.c
2470@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
2471 void
2472 module_free (struct module *mod, void *module_region)
2473 {
2474- if (mod && mod->arch.init_unw_table &&
2475- module_region == mod->module_init) {
2476+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2477 unw_remove_unwind_table(mod->arch.init_unw_table);
2478 mod->arch.init_unw_table = NULL;
2479 }
2480@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
2481 }
2482
2483 static inline int
2484+in_init_rx (const struct module *mod, uint64_t addr)
2485+{
2486+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2487+}
2488+
2489+static inline int
2490+in_init_rw (const struct module *mod, uint64_t addr)
2491+{
2492+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2493+}
2494+
2495+static inline int
2496 in_init (const struct module *mod, uint64_t addr)
2497 {
2498- return addr - (uint64_t) mod->module_init < mod->init_size;
2499+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2500+}
2501+
2502+static inline int
2503+in_core_rx (const struct module *mod, uint64_t addr)
2504+{
2505+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2506+}
2507+
2508+static inline int
2509+in_core_rw (const struct module *mod, uint64_t addr)
2510+{
2511+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2512 }
2513
2514 static inline int
2515 in_core (const struct module *mod, uint64_t addr)
2516 {
2517- return addr - (uint64_t) mod->module_core < mod->core_size;
2518+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2519 }
2520
2521 static inline int
2522@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
2523 break;
2524
2525 case RV_BDREL:
2526- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2527+ if (in_init_rx(mod, val))
2528+ val -= (uint64_t) mod->module_init_rx;
2529+ else if (in_init_rw(mod, val))
2530+ val -= (uint64_t) mod->module_init_rw;
2531+ else if (in_core_rx(mod, val))
2532+ val -= (uint64_t) mod->module_core_rx;
2533+ else if (in_core_rw(mod, val))
2534+ val -= (uint64_t) mod->module_core_rw;
2535 break;
2536
2537 case RV_LTV:
2538@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
2539 * addresses have been selected...
2540 */
2541 uint64_t gp;
2542- if (mod->core_size > MAX_LTOFF)
2543+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2544 /*
2545 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2546 * at the end of the module.
2547 */
2548- gp = mod->core_size - MAX_LTOFF / 2;
2549+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2550 else
2551- gp = mod->core_size / 2;
2552- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2553+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2554+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2555 mod->arch.gp = gp;
2556 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2557 }
2558diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2559index 609d500..7dde2a8 100644
2560--- a/arch/ia64/kernel/sys_ia64.c
2561+++ b/arch/ia64/kernel/sys_ia64.c
2562@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2563 if (REGION_NUMBER(addr) == RGN_HPAGE)
2564 addr = 0;
2565 #endif
2566+
2567+#ifdef CONFIG_PAX_RANDMMAP
2568+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2569+ addr = mm->free_area_cache;
2570+ else
2571+#endif
2572+
2573 if (!addr)
2574 addr = mm->free_area_cache;
2575
2576@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2577 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2578 /* At this point: (!vma || addr < vma->vm_end). */
2579 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2580- if (start_addr != TASK_UNMAPPED_BASE) {
2581+ if (start_addr != mm->mmap_base) {
2582 /* Start a new search --- just in case we missed some holes. */
2583- addr = TASK_UNMAPPED_BASE;
2584+ addr = mm->mmap_base;
2585 goto full_search;
2586 }
2587 return -ENOMEM;
2588 }
2589- if (!vma || addr + len <= vma->vm_start) {
2590+ if (check_heap_stack_gap(vma, addr, len)) {
2591 /* Remember the address where we stopped this search: */
2592 mm->free_area_cache = addr + len;
2593 return addr;
2594diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2595index 0ccb28f..8992469 100644
2596--- a/arch/ia64/kernel/vmlinux.lds.S
2597+++ b/arch/ia64/kernel/vmlinux.lds.S
2598@@ -198,7 +198,7 @@ SECTIONS {
2599 /* Per-cpu data: */
2600 . = ALIGN(PERCPU_PAGE_SIZE);
2601 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
2602- __phys_per_cpu_start = __per_cpu_load;
2603+ __phys_per_cpu_start = per_cpu_load;
2604 /*
2605 * ensure percpu data fits
2606 * into percpu page size
2607diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2608index 02d29c2..ea893df 100644
2609--- a/arch/ia64/mm/fault.c
2610+++ b/arch/ia64/mm/fault.c
2611@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
2612 return pte_present(pte);
2613 }
2614
2615+#ifdef CONFIG_PAX_PAGEEXEC
2616+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2617+{
2618+ unsigned long i;
2619+
2620+ printk(KERN_ERR "PAX: bytes at PC: ");
2621+ for (i = 0; i < 8; i++) {
2622+ unsigned int c;
2623+ if (get_user(c, (unsigned int *)pc+i))
2624+ printk(KERN_CONT "???????? ");
2625+ else
2626+ printk(KERN_CONT "%08x ", c);
2627+ }
2628+ printk("\n");
2629+}
2630+#endif
2631+
2632 void __kprobes
2633 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2634 {
2635@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
2636 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2637 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2638
2639- if ((vma->vm_flags & mask) != mask)
2640+ if ((vma->vm_flags & mask) != mask) {
2641+
2642+#ifdef CONFIG_PAX_PAGEEXEC
2643+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2644+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2645+ goto bad_area;
2646+
2647+ up_read(&mm->mmap_sem);
2648+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2649+ do_group_exit(SIGKILL);
2650+ }
2651+#endif
2652+
2653 goto bad_area;
2654
2655+ }
2656+
2657 /*
2658 * If for any reason at all we couldn't handle the fault, make
2659 * sure we exit gracefully rather than endlessly redo the
2660diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2661index 5ca674b..e0e1b70 100644
2662--- a/arch/ia64/mm/hugetlbpage.c
2663+++ b/arch/ia64/mm/hugetlbpage.c
2664@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
2665 /* At this point: (!vmm || addr < vmm->vm_end). */
2666 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2667 return -ENOMEM;
2668- if (!vmm || (addr + len) <= vmm->vm_start)
2669+ if (check_heap_stack_gap(vmm, addr, len))
2670 return addr;
2671 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2672 }
2673diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
2674index 0eab454..bd794f2 100644
2675--- a/arch/ia64/mm/init.c
2676+++ b/arch/ia64/mm/init.c
2677@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
2678 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2679 vma->vm_end = vma->vm_start + PAGE_SIZE;
2680 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2681+
2682+#ifdef CONFIG_PAX_PAGEEXEC
2683+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2684+ vma->vm_flags &= ~VM_EXEC;
2685+
2686+#ifdef CONFIG_PAX_MPROTECT
2687+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
2688+ vma->vm_flags &= ~VM_MAYEXEC;
2689+#endif
2690+
2691+ }
2692+#endif
2693+
2694 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2695 down_write(&current->mm->mmap_sem);
2696 if (insert_vm_struct(current->mm, vma)) {
2697diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2698index 40b3ee9..8c2c112 100644
2699--- a/arch/m32r/include/asm/cache.h
2700+++ b/arch/m32r/include/asm/cache.h
2701@@ -1,8 +1,10 @@
2702 #ifndef _ASM_M32R_CACHE_H
2703 #define _ASM_M32R_CACHE_H
2704
2705+#include <linux/const.h>
2706+
2707 /* L1 cache line size */
2708 #define L1_CACHE_SHIFT 4
2709-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2710+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2711
2712 #endif /* _ASM_M32R_CACHE_H */
2713diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2714index 82abd15..d95ae5d 100644
2715--- a/arch/m32r/lib/usercopy.c
2716+++ b/arch/m32r/lib/usercopy.c
2717@@ -14,6 +14,9 @@
2718 unsigned long
2719 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2720 {
2721+ if ((long)n < 0)
2722+ return n;
2723+
2724 prefetch(from);
2725 if (access_ok(VERIFY_WRITE, to, n))
2726 __copy_user(to,from,n);
2727@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2728 unsigned long
2729 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2730 {
2731+ if ((long)n < 0)
2732+ return n;
2733+
2734 prefetchw(to);
2735 if (access_ok(VERIFY_READ, from, n))
2736 __copy_user_zeroing(to,from,n);
2737diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2738index 0395c51..5f26031 100644
2739--- a/arch/m68k/include/asm/cache.h
2740+++ b/arch/m68k/include/asm/cache.h
2741@@ -4,9 +4,11 @@
2742 #ifndef __ARCH_M68K_CACHE_H
2743 #define __ARCH_M68K_CACHE_H
2744
2745+#include <linux/const.h>
2746+
2747 /* bytes per L1 cache line */
2748 #define L1_CACHE_SHIFT 4
2749-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2750+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2751
2752 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2753
2754diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2755index 4efe96a..60e8699 100644
2756--- a/arch/microblaze/include/asm/cache.h
2757+++ b/arch/microblaze/include/asm/cache.h
2758@@ -13,11 +13,12 @@
2759 #ifndef _ASM_MICROBLAZE_CACHE_H
2760 #define _ASM_MICROBLAZE_CACHE_H
2761
2762+#include <linux/const.h>
2763 #include <asm/registers.h>
2764
2765 #define L1_CACHE_SHIFT 5
2766 /* word-granular cache in microblaze */
2767-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2768+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2769
2770 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2771
2772diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2773index 3f4c5cb..3439c6e 100644
2774--- a/arch/mips/include/asm/atomic.h
2775+++ b/arch/mips/include/asm/atomic.h
2776@@ -21,6 +21,10 @@
2777 #include <asm/cmpxchg.h>
2778 #include <asm/war.h>
2779
2780+#ifdef CONFIG_GENERIC_ATOMIC64
2781+#include <asm-generic/atomic64.h>
2782+#endif
2783+
2784 #define ATOMIC_INIT(i) { (i) }
2785
2786 /*
2787@@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2788 */
2789 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2790
2791+#define atomic64_read_unchecked(v) atomic64_read(v)
2792+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2793+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2794+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2795+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2796+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2797+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2798+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2799+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2800+
2801 #endif /* CONFIG_64BIT */
2802
2803 /*
2804diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2805index b4db69f..8f3b093 100644
2806--- a/arch/mips/include/asm/cache.h
2807+++ b/arch/mips/include/asm/cache.h
2808@@ -9,10 +9,11 @@
2809 #ifndef _ASM_CACHE_H
2810 #define _ASM_CACHE_H
2811
2812+#include <linux/const.h>
2813 #include <kmalloc.h>
2814
2815 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2816-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2817+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2818
2819 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2820 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2821diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2822index 455c0ac..ad65fbe 100644
2823--- a/arch/mips/include/asm/elf.h
2824+++ b/arch/mips/include/asm/elf.h
2825@@ -372,13 +372,16 @@ extern const char *__elf_platform;
2826 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2827 #endif
2828
2829+#ifdef CONFIG_PAX_ASLR
2830+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2831+
2832+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2833+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2834+#endif
2835+
2836 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2837 struct linux_binprm;
2838 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2839 int uses_interp);
2840
2841-struct mm_struct;
2842-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2843-#define arch_randomize_brk arch_randomize_brk
2844-
2845 #endif /* _ASM_ELF_H */
2846diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
2847index c1f6afa..38cc6e9 100644
2848--- a/arch/mips/include/asm/exec.h
2849+++ b/arch/mips/include/asm/exec.h
2850@@ -12,6 +12,6 @@
2851 #ifndef _ASM_EXEC_H
2852 #define _ASM_EXEC_H
2853
2854-extern unsigned long arch_align_stack(unsigned long sp);
2855+#define arch_align_stack(x) ((x) & ~0xfUL)
2856
2857 #endif /* _ASM_EXEC_H */
2858diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2859index da9bd7d..91aa7ab 100644
2860--- a/arch/mips/include/asm/page.h
2861+++ b/arch/mips/include/asm/page.h
2862@@ -98,7 +98,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2863 #ifdef CONFIG_CPU_MIPS32
2864 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2865 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2866- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2867+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2868 #else
2869 typedef struct { unsigned long long pte; } pte_t;
2870 #define pte_val(x) ((x).pte)
2871diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
2872index 881d18b..cea38bc 100644
2873--- a/arch/mips/include/asm/pgalloc.h
2874+++ b/arch/mips/include/asm/pgalloc.h
2875@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2876 {
2877 set_pud(pud, __pud((unsigned long)pmd));
2878 }
2879+
2880+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2881+{
2882+ pud_populate(mm, pud, pmd);
2883+}
2884 #endif
2885
2886 /*
2887diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
2888index 0d85d8e..ec71487 100644
2889--- a/arch/mips/include/asm/thread_info.h
2890+++ b/arch/mips/include/asm/thread_info.h
2891@@ -123,6 +123,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
2892 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
2893 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
2894 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
2895+/* li takes a 32bit immediate */
2896+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
2897 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
2898
2899 #ifdef CONFIG_MIPS32_O32
2900@@ -146,15 +148,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
2901 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
2902 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
2903 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
2904+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
2905+
2906+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2907
2908 /* work to do in syscall_trace_leave() */
2909-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
2910+#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2911
2912 /* work to do on interrupt/exception return */
2913 #define _TIF_WORK_MASK (0x0000ffef & \
2914 ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
2915 /* work to do on any return to u-space */
2916-#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)
2917+#define _TIF_ALLWORK_MASK ((0x8000ffff & ~_TIF_SECCOMP) | _TIF_GRSEC_SETXID)
2918
2919 #endif /* __KERNEL__ */
2920
2921diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2922index 9fdd8bc..4bd7f1a 100644
2923--- a/arch/mips/kernel/binfmt_elfn32.c
2924+++ b/arch/mips/kernel/binfmt_elfn32.c
2925@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2926 #undef ELF_ET_DYN_BASE
2927 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2928
2929+#ifdef CONFIG_PAX_ASLR
2930+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2931+
2932+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2933+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2934+#endif
2935+
2936 #include <asm/processor.h>
2937 #include <linux/module.h>
2938 #include <linux/elfcore.h>
2939diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2940index ff44823..97f8906 100644
2941--- a/arch/mips/kernel/binfmt_elfo32.c
2942+++ b/arch/mips/kernel/binfmt_elfo32.c
2943@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2944 #undef ELF_ET_DYN_BASE
2945 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2946
2947+#ifdef CONFIG_PAX_ASLR
2948+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2949+
2950+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2951+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2952+#endif
2953+
2954 #include <asm/processor.h>
2955
2956 /*
2957diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2958index e9a5fd7..378809a 100644
2959--- a/arch/mips/kernel/process.c
2960+++ b/arch/mips/kernel/process.c
2961@@ -480,15 +480,3 @@ unsigned long get_wchan(struct task_struct *task)
2962 out:
2963 return pc;
2964 }
2965-
2966-/*
2967- * Don't forget that the stack pointer must be aligned on a 8 bytes
2968- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2969- */
2970-unsigned long arch_align_stack(unsigned long sp)
2971-{
2972- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2973- sp -= get_random_int() & ~PAGE_MASK;
2974-
2975- return sp & ALMASK;
2976-}
2977diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
2978index 7c24c29..e2f1981 100644
2979--- a/arch/mips/kernel/ptrace.c
2980+++ b/arch/mips/kernel/ptrace.c
2981@@ -528,6 +528,10 @@ static inline int audit_arch(void)
2982 return arch;
2983 }
2984
2985+#ifdef CONFIG_GRKERNSEC_SETXID
2986+extern void gr_delayed_cred_worker(void);
2987+#endif
2988+
2989 /*
2990 * Notification of system call entry/exit
2991 * - triggered by current->work.syscall_trace
2992@@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
2993 /* do the secure computing check first */
2994 secure_computing(regs->regs[2]);
2995
2996+#ifdef CONFIG_GRKERNSEC_SETXID
2997+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2998+ gr_delayed_cred_worker();
2999+#endif
3000+
3001 if (!(current->ptrace & PT_PTRACED))
3002 goto out;
3003
3004diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
3005index a632bc1..0b77c7c 100644
3006--- a/arch/mips/kernel/scall32-o32.S
3007+++ b/arch/mips/kernel/scall32-o32.S
3008@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3009
3010 stack_done:
3011 lw t0, TI_FLAGS($28) # syscall tracing enabled?
3012- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3013+ li t1, _TIF_SYSCALL_WORK
3014 and t0, t1
3015 bnez t0, syscall_trace_entry # -> yes
3016
3017diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
3018index 3b5a5e9..e1ee86d 100644
3019--- a/arch/mips/kernel/scall64-64.S
3020+++ b/arch/mips/kernel/scall64-64.S
3021@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
3022
3023 sd a3, PT_R26(sp) # save a3 for syscall restarting
3024
3025- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3026+ li t1, _TIF_SYSCALL_WORK
3027 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3028 and t0, t1, t0
3029 bnez t0, syscall_trace_entry
3030diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
3031index 6be6f70..1859577 100644
3032--- a/arch/mips/kernel/scall64-n32.S
3033+++ b/arch/mips/kernel/scall64-n32.S
3034@@ -53,7 +53,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
3035
3036 sd a3, PT_R26(sp) # save a3 for syscall restarting
3037
3038- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3039+ li t1, _TIF_SYSCALL_WORK
3040 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3041 and t0, t1, t0
3042 bnez t0, n32_syscall_trace_entry
3043diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
3044index 5422855..74e63a3 100644
3045--- a/arch/mips/kernel/scall64-o32.S
3046+++ b/arch/mips/kernel/scall64-o32.S
3047@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3048 PTR 4b, bad_stack
3049 .previous
3050
3051- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3052+ li t1, _TIF_SYSCALL_WORK
3053 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3054 and t0, t1, t0
3055 bnez t0, trace_a_syscall
3056diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
3057index c14f6df..537e729 100644
3058--- a/arch/mips/mm/fault.c
3059+++ b/arch/mips/mm/fault.c
3060@@ -27,6 +27,23 @@
3061 #include <asm/highmem.h> /* For VMALLOC_END */
3062 #include <linux/kdebug.h>
3063
3064+#ifdef CONFIG_PAX_PAGEEXEC
3065+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3066+{
3067+ unsigned long i;
3068+
3069+ printk(KERN_ERR "PAX: bytes at PC: ");
3070+ for (i = 0; i < 5; i++) {
3071+ unsigned int c;
3072+ if (get_user(c, (unsigned int *)pc+i))
3073+ printk(KERN_CONT "???????? ");
3074+ else
3075+ printk(KERN_CONT "%08x ", c);
3076+ }
3077+ printk("\n");
3078+}
3079+#endif
3080+
3081 /*
3082 * This routine handles page faults. It determines the address,
3083 * and the problem, and then passes it off to one of the appropriate
3084diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
3085index 302d779..7d35bf8 100644
3086--- a/arch/mips/mm/mmap.c
3087+++ b/arch/mips/mm/mmap.c
3088@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3089 do_color_align = 1;
3090
3091 /* requesting a specific address */
3092+
3093+#ifdef CONFIG_PAX_RANDMMAP
3094+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
3095+#endif
3096+
3097 if (addr) {
3098 if (do_color_align)
3099 addr = COLOUR_ALIGN(addr, pgoff);
3100@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3101 addr = PAGE_ALIGN(addr);
3102
3103 vma = find_vma(mm, addr);
3104- if (TASK_SIZE - len >= addr &&
3105- (!vma || addr + len <= vma->vm_start))
3106+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
3107 return addr;
3108 }
3109
3110@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3111 /* At this point: (!vma || addr < vma->vm_end). */
3112 if (TASK_SIZE - len < addr)
3113 return -ENOMEM;
3114- if (!vma || addr + len <= vma->vm_start)
3115+ if (check_heap_stack_gap(vmm, addr, len))
3116 return addr;
3117 addr = vma->vm_end;
3118 if (do_color_align)
3119@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3120 /* make sure it can fit in the remaining address space */
3121 if (likely(addr > len)) {
3122 vma = find_vma(mm, addr - len);
3123- if (!vma || addr <= vma->vm_start) {
3124+ if (check_heap_stack_gap(vmm, addr - len, len))
3125 /* cache the address as a hint for next time */
3126 return mm->free_area_cache = addr - len;
3127 }
3128@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3129 * return with success:
3130 */
3131 vma = find_vma(mm, addr);
3132- if (likely(!vma || addr + len <= vma->vm_start)) {
3133+ if (check_heap_stack_gap(vmm, addr, len)) {
3134 /* cache the address as a hint for next time */
3135 return mm->free_area_cache = addr;
3136 }
3137@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3138 mm->unmap_area = arch_unmap_area_topdown;
3139 }
3140 }
3141-
3142-static inline unsigned long brk_rnd(void)
3143-{
3144- unsigned long rnd = get_random_int();
3145-
3146- rnd = rnd << PAGE_SHIFT;
3147- /* 8MB for 32bit, 256MB for 64bit */
3148- if (TASK_IS_32BIT_ADDR)
3149- rnd = rnd & 0x7ffffful;
3150- else
3151- rnd = rnd & 0xffffffful;
3152-
3153- return rnd;
3154-}
3155-
3156-unsigned long arch_randomize_brk(struct mm_struct *mm)
3157-{
3158- unsigned long base = mm->brk;
3159- unsigned long ret;
3160-
3161- ret = PAGE_ALIGN(base + brk_rnd());
3162-
3163- if (ret < mm->brk)
3164- return mm->brk;
3165-
3166- return ret;
3167-}
3168diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3169index 967d144..db12197 100644
3170--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
3171+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3172@@ -11,12 +11,14 @@
3173 #ifndef _ASM_PROC_CACHE_H
3174 #define _ASM_PROC_CACHE_H
3175
3176+#include <linux/const.h>
3177+
3178 /* L1 cache */
3179
3180 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3181 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
3182-#define L1_CACHE_BYTES 16 /* bytes per entry */
3183 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
3184+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3185 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
3186
3187 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3188diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3189index bcb5df2..84fabd2 100644
3190--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3191+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3192@@ -16,13 +16,15 @@
3193 #ifndef _ASM_PROC_CACHE_H
3194 #define _ASM_PROC_CACHE_H
3195
3196+#include <linux/const.h>
3197+
3198 /*
3199 * L1 cache
3200 */
3201 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3202 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
3203-#define L1_CACHE_BYTES 32 /* bytes per entry */
3204 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
3205+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3206 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
3207
3208 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3209diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
3210index 4ce7a01..449202a 100644
3211--- a/arch/openrisc/include/asm/cache.h
3212+++ b/arch/openrisc/include/asm/cache.h
3213@@ -19,11 +19,13 @@
3214 #ifndef __ASM_OPENRISC_CACHE_H
3215 #define __ASM_OPENRISC_CACHE_H
3216
3217+#include <linux/const.h>
3218+
3219 /* FIXME: How can we replace these with values from the CPU...
3220 * they shouldn't be hard-coded!
3221 */
3222
3223-#define L1_CACHE_BYTES 16
3224 #define L1_CACHE_SHIFT 4
3225+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3226
3227 #endif /* __ASM_OPENRISC_CACHE_H */
3228diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
3229index 6c6defc..d30653d 100644
3230--- a/arch/parisc/include/asm/atomic.h
3231+++ b/arch/parisc/include/asm/atomic.h
3232@@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3233
3234 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3235
3236+#define atomic64_read_unchecked(v) atomic64_read(v)
3237+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3238+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3239+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3240+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3241+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3242+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3243+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3244+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3245+
3246 #endif /* !CONFIG_64BIT */
3247
3248
3249diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
3250index 47f11c7..3420df2 100644
3251--- a/arch/parisc/include/asm/cache.h
3252+++ b/arch/parisc/include/asm/cache.h
3253@@ -5,6 +5,7 @@
3254 #ifndef __ARCH_PARISC_CACHE_H
3255 #define __ARCH_PARISC_CACHE_H
3256
3257+#include <linux/const.h>
3258
3259 /*
3260 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
3261@@ -15,13 +16,13 @@
3262 * just ruin performance.
3263 */
3264 #ifdef CONFIG_PA20
3265-#define L1_CACHE_BYTES 64
3266 #define L1_CACHE_SHIFT 6
3267 #else
3268-#define L1_CACHE_BYTES 32
3269 #define L1_CACHE_SHIFT 5
3270 #endif
3271
3272+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3273+
3274 #ifndef __ASSEMBLY__
3275
3276 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3277diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
3278index 19f6cb1..6c78cf2 100644
3279--- a/arch/parisc/include/asm/elf.h
3280+++ b/arch/parisc/include/asm/elf.h
3281@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
3282
3283 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
3284
3285+#ifdef CONFIG_PAX_ASLR
3286+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3287+
3288+#define PAX_DELTA_MMAP_LEN 16
3289+#define PAX_DELTA_STACK_LEN 16
3290+#endif
3291+
3292 /* This yields a mask that user programs can use to figure out what
3293 instruction set this CPU supports. This could be done in user space,
3294 but it's not easy, and we've already done it here. */
3295diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
3296index fc987a1..6e068ef 100644
3297--- a/arch/parisc/include/asm/pgalloc.h
3298+++ b/arch/parisc/include/asm/pgalloc.h
3299@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3300 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
3301 }
3302
3303+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3304+{
3305+ pgd_populate(mm, pgd, pmd);
3306+}
3307+
3308 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
3309 {
3310 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
3311@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
3312 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
3313 #define pmd_free(mm, x) do { } while (0)
3314 #define pgd_populate(mm, pmd, pte) BUG()
3315+#define pgd_populate_kernel(mm, pmd, pte) BUG()
3316
3317 #endif
3318
3319diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
3320index ee99f23..802b0a1 100644
3321--- a/arch/parisc/include/asm/pgtable.h
3322+++ b/arch/parisc/include/asm/pgtable.h
3323@@ -212,6 +212,17 @@ struct vm_area_struct;
3324 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
3325 #define PAGE_COPY PAGE_EXECREAD
3326 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
3327+
3328+#ifdef CONFIG_PAX_PAGEEXEC
3329+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
3330+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3331+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3332+#else
3333+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3334+# define PAGE_COPY_NOEXEC PAGE_COPY
3335+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3336+#endif
3337+
3338 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
3339 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
3340 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
3341diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
3342index 9ac0660..6ed15c4 100644
3343--- a/arch/parisc/include/asm/uaccess.h
3344+++ b/arch/parisc/include/asm/uaccess.h
3345@@ -252,10 +252,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
3346 const void __user *from,
3347 unsigned long n)
3348 {
3349- int sz = __compiletime_object_size(to);
3350+ size_t sz = __compiletime_object_size(to);
3351 int ret = -EFAULT;
3352
3353- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
3354+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
3355 ret = __copy_from_user(to, from, n);
3356 else
3357 copy_from_user_overflow();
3358diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
3359index 5e34ccf..672bc9c 100644
3360--- a/arch/parisc/kernel/module.c
3361+++ b/arch/parisc/kernel/module.c
3362@@ -98,16 +98,38 @@
3363
3364 /* three functions to determine where in the module core
3365 * or init pieces the location is */
3366+static inline int in_init_rx(struct module *me, void *loc)
3367+{
3368+ return (loc >= me->module_init_rx &&
3369+ loc < (me->module_init_rx + me->init_size_rx));
3370+}
3371+
3372+static inline int in_init_rw(struct module *me, void *loc)
3373+{
3374+ return (loc >= me->module_init_rw &&
3375+ loc < (me->module_init_rw + me->init_size_rw));
3376+}
3377+
3378 static inline int in_init(struct module *me, void *loc)
3379 {
3380- return (loc >= me->module_init &&
3381- loc <= (me->module_init + me->init_size));
3382+ return in_init_rx(me, loc) || in_init_rw(me, loc);
3383+}
3384+
3385+static inline int in_core_rx(struct module *me, void *loc)
3386+{
3387+ return (loc >= me->module_core_rx &&
3388+ loc < (me->module_core_rx + me->core_size_rx));
3389+}
3390+
3391+static inline int in_core_rw(struct module *me, void *loc)
3392+{
3393+ return (loc >= me->module_core_rw &&
3394+ loc < (me->module_core_rw + me->core_size_rw));
3395 }
3396
3397 static inline int in_core(struct module *me, void *loc)
3398 {
3399- return (loc >= me->module_core &&
3400- loc <= (me->module_core + me->core_size));
3401+ return in_core_rx(me, loc) || in_core_rw(me, loc);
3402 }
3403
3404 static inline int in_local(struct module *me, void *loc)
3405@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
3406 }
3407
3408 /* align things a bit */
3409- me->core_size = ALIGN(me->core_size, 16);
3410- me->arch.got_offset = me->core_size;
3411- me->core_size += gots * sizeof(struct got_entry);
3412+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
3413+ me->arch.got_offset = me->core_size_rw;
3414+ me->core_size_rw += gots * sizeof(struct got_entry);
3415
3416- me->core_size = ALIGN(me->core_size, 16);
3417- me->arch.fdesc_offset = me->core_size;
3418- me->core_size += fdescs * sizeof(Elf_Fdesc);
3419+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
3420+ me->arch.fdesc_offset = me->core_size_rw;
3421+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3422
3423 me->arch.got_max = gots;
3424 me->arch.fdesc_max = fdescs;
3425@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3426
3427 BUG_ON(value == 0);
3428
3429- got = me->module_core + me->arch.got_offset;
3430+ got = me->module_core_rw + me->arch.got_offset;
3431 for (i = 0; got[i].addr; i++)
3432 if (got[i].addr == value)
3433 goto out;
3434@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3435 #ifdef CONFIG_64BIT
3436 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3437 {
3438- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3439+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3440
3441 if (!value) {
3442 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
3443@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3444
3445 /* Create new one */
3446 fdesc->addr = value;
3447- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3448+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3449 return (Elf_Addr)fdesc;
3450 }
3451 #endif /* CONFIG_64BIT */
3452@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
3453
3454 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3455 end = table + sechdrs[me->arch.unwind_section].sh_size;
3456- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3457+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3458
3459 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3460 me->arch.unwind_section, table, end, gp);
3461diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3462index c9b9322..02d8940 100644
3463--- a/arch/parisc/kernel/sys_parisc.c
3464+++ b/arch/parisc/kernel/sys_parisc.c
3465@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
3466 /* At this point: (!vma || addr < vma->vm_end). */
3467 if (TASK_SIZE - len < addr)
3468 return -ENOMEM;
3469- if (!vma || addr + len <= vma->vm_start)
3470+ if (check_heap_stack_gap(vma, addr, len))
3471 return addr;
3472 addr = vma->vm_end;
3473 }
3474@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
3475 /* At this point: (!vma || addr < vma->vm_end). */
3476 if (TASK_SIZE - len < addr)
3477 return -ENOMEM;
3478- if (!vma || addr + len <= vma->vm_start)
3479+ if (check_heap_stack_gap(vma, addr, len))
3480 return addr;
3481 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3482 if (addr < vma->vm_end) /* handle wraparound */
3483@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3484 if (flags & MAP_FIXED)
3485 return addr;
3486 if (!addr)
3487- addr = TASK_UNMAPPED_BASE;
3488+ addr = current->mm->mmap_base;
3489
3490 if (filp) {
3491 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
3492diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3493index 45ba99f..8e22c33 100644
3494--- a/arch/parisc/kernel/traps.c
3495+++ b/arch/parisc/kernel/traps.c
3496@@ -732,9 +732,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
3497
3498 down_read(&current->mm->mmap_sem);
3499 vma = find_vma(current->mm,regs->iaoq[0]);
3500- if (vma && (regs->iaoq[0] >= vma->vm_start)
3501- && (vma->vm_flags & VM_EXEC)) {
3502-
3503+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3504 fault_address = regs->iaoq[0];
3505 fault_space = regs->iasq[0];
3506
3507diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3508index 18162ce..94de376 100644
3509--- a/arch/parisc/mm/fault.c
3510+++ b/arch/parisc/mm/fault.c
3511@@ -15,6 +15,7 @@
3512 #include <linux/sched.h>
3513 #include <linux/interrupt.h>
3514 #include <linux/module.h>
3515+#include <linux/unistd.h>
3516
3517 #include <asm/uaccess.h>
3518 #include <asm/traps.h>
3519@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
3520 static unsigned long
3521 parisc_acctyp(unsigned long code, unsigned int inst)
3522 {
3523- if (code == 6 || code == 16)
3524+ if (code == 6 || code == 7 || code == 16)
3525 return VM_EXEC;
3526
3527 switch (inst & 0xf0000000) {
3528@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
3529 }
3530 #endif
3531
3532+#ifdef CONFIG_PAX_PAGEEXEC
3533+/*
3534+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3535+ *
3536+ * returns 1 when task should be killed
3537+ * 2 when rt_sigreturn trampoline was detected
3538+ * 3 when unpatched PLT trampoline was detected
3539+ */
3540+static int pax_handle_fetch_fault(struct pt_regs *regs)
3541+{
3542+
3543+#ifdef CONFIG_PAX_EMUPLT
3544+ int err;
3545+
3546+ do { /* PaX: unpatched PLT emulation */
3547+ unsigned int bl, depwi;
3548+
3549+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3550+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3551+
3552+ if (err)
3553+ break;
3554+
3555+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3556+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3557+
3558+ err = get_user(ldw, (unsigned int *)addr);
3559+ err |= get_user(bv, (unsigned int *)(addr+4));
3560+ err |= get_user(ldw2, (unsigned int *)(addr+8));
3561+
3562+ if (err)
3563+ break;
3564+
3565+ if (ldw == 0x0E801096U &&
3566+ bv == 0xEAC0C000U &&
3567+ ldw2 == 0x0E881095U)
3568+ {
3569+ unsigned int resolver, map;
3570+
3571+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3572+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3573+ if (err)
3574+ break;
3575+
3576+ regs->gr[20] = instruction_pointer(regs)+8;
3577+ regs->gr[21] = map;
3578+ regs->gr[22] = resolver;
3579+ regs->iaoq[0] = resolver | 3UL;
3580+ regs->iaoq[1] = regs->iaoq[0] + 4;
3581+ return 3;
3582+ }
3583+ }
3584+ } while (0);
3585+#endif
3586+
3587+#ifdef CONFIG_PAX_EMUTRAMP
3588+
3589+#ifndef CONFIG_PAX_EMUSIGRT
3590+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3591+ return 1;
3592+#endif
3593+
3594+ do { /* PaX: rt_sigreturn emulation */
3595+ unsigned int ldi1, ldi2, bel, nop;
3596+
3597+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3598+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3599+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3600+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3601+
3602+ if (err)
3603+ break;
3604+
3605+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3606+ ldi2 == 0x3414015AU &&
3607+ bel == 0xE4008200U &&
3608+ nop == 0x08000240U)
3609+ {
3610+ regs->gr[25] = (ldi1 & 2) >> 1;
3611+ regs->gr[20] = __NR_rt_sigreturn;
3612+ regs->gr[31] = regs->iaoq[1] + 16;
3613+ regs->sr[0] = regs->iasq[1];
3614+ regs->iaoq[0] = 0x100UL;
3615+ regs->iaoq[1] = regs->iaoq[0] + 4;
3616+ regs->iasq[0] = regs->sr[2];
3617+ regs->iasq[1] = regs->sr[2];
3618+ return 2;
3619+ }
3620+ } while (0);
3621+#endif
3622+
3623+ return 1;
3624+}
3625+
3626+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3627+{
3628+ unsigned long i;
3629+
3630+ printk(KERN_ERR "PAX: bytes at PC: ");
3631+ for (i = 0; i < 5; i++) {
3632+ unsigned int c;
3633+ if (get_user(c, (unsigned int *)pc+i))
3634+ printk(KERN_CONT "???????? ");
3635+ else
3636+ printk(KERN_CONT "%08x ", c);
3637+ }
3638+ printk("\n");
3639+}
3640+#endif
3641+
3642 int fixup_exception(struct pt_regs *regs)
3643 {
3644 const struct exception_table_entry *fix;
3645@@ -192,8 +303,33 @@ good_area:
3646
3647 acc_type = parisc_acctyp(code,regs->iir);
3648
3649- if ((vma->vm_flags & acc_type) != acc_type)
3650+ if ((vma->vm_flags & acc_type) != acc_type) {
3651+
3652+#ifdef CONFIG_PAX_PAGEEXEC
3653+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3654+ (address & ~3UL) == instruction_pointer(regs))
3655+ {
3656+ up_read(&mm->mmap_sem);
3657+ switch (pax_handle_fetch_fault(regs)) {
3658+
3659+#ifdef CONFIG_PAX_EMUPLT
3660+ case 3:
3661+ return;
3662+#endif
3663+
3664+#ifdef CONFIG_PAX_EMUTRAMP
3665+ case 2:
3666+ return;
3667+#endif
3668+
3669+ }
3670+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3671+ do_group_exit(SIGKILL);
3672+ }
3673+#endif
3674+
3675 goto bad_area;
3676+ }
3677
3678 /*
3679 * If for any reason at all we couldn't handle the fault, make
3680diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
3681index da29032..f76c24c 100644
3682--- a/arch/powerpc/include/asm/atomic.h
3683+++ b/arch/powerpc/include/asm/atomic.h
3684@@ -522,6 +522,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
3685 return t1;
3686 }
3687
3688+#define atomic64_read_unchecked(v) atomic64_read(v)
3689+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3690+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3691+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3692+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3693+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3694+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3695+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3696+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3697+
3698 #endif /* __powerpc64__ */
3699
3700 #endif /* __KERNEL__ */
3701diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3702index 9e495c9..b6878e5 100644
3703--- a/arch/powerpc/include/asm/cache.h
3704+++ b/arch/powerpc/include/asm/cache.h
3705@@ -3,6 +3,7 @@
3706
3707 #ifdef __KERNEL__
3708
3709+#include <linux/const.h>
3710
3711 /* bytes per L1 cache line */
3712 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3713@@ -22,7 +23,7 @@
3714 #define L1_CACHE_SHIFT 7
3715 #endif
3716
3717-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3718+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3719
3720 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3721
3722diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3723index 3bf9cca..e7457d0 100644
3724--- a/arch/powerpc/include/asm/elf.h
3725+++ b/arch/powerpc/include/asm/elf.h
3726@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
3727 the loader. We need to make sure that it is out of the way of the program
3728 that it will "exec", and that there is sufficient room for the brk. */
3729
3730-extern unsigned long randomize_et_dyn(unsigned long base);
3731-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3732+#define ELF_ET_DYN_BASE (0x20000000)
3733+
3734+#ifdef CONFIG_PAX_ASLR
3735+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3736+
3737+#ifdef __powerpc64__
3738+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
3739+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
3740+#else
3741+#define PAX_DELTA_MMAP_LEN 15
3742+#define PAX_DELTA_STACK_LEN 15
3743+#endif
3744+#endif
3745
3746 /*
3747 * Our registers are always unsigned longs, whether we're a 32 bit
3748@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3749 (0x7ff >> (PAGE_SHIFT - 12)) : \
3750 (0x3ffff >> (PAGE_SHIFT - 12)))
3751
3752-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3753-#define arch_randomize_brk arch_randomize_brk
3754-
3755 #endif /* __KERNEL__ */
3756
3757 /*
3758diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
3759index 8196e9c..d83a9f3 100644
3760--- a/arch/powerpc/include/asm/exec.h
3761+++ b/arch/powerpc/include/asm/exec.h
3762@@ -4,6 +4,6 @@
3763 #ifndef _ASM_POWERPC_EXEC_H
3764 #define _ASM_POWERPC_EXEC_H
3765
3766-extern unsigned long arch_align_stack(unsigned long sp);
3767+#define arch_align_stack(x) ((x) & ~0xfUL)
3768
3769 #endif /* _ASM_POWERPC_EXEC_H */
3770diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3771index bca8fdc..61e9580 100644
3772--- a/arch/powerpc/include/asm/kmap_types.h
3773+++ b/arch/powerpc/include/asm/kmap_types.h
3774@@ -27,6 +27,7 @@ enum km_type {
3775 KM_PPC_SYNC_PAGE,
3776 KM_PPC_SYNC_ICACHE,
3777 KM_KDB,
3778+ KM_CLEARPAGE,
3779 KM_TYPE_NR
3780 };
3781
3782diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
3783index d4a7f64..451de1c 100644
3784--- a/arch/powerpc/include/asm/mman.h
3785+++ b/arch/powerpc/include/asm/mman.h
3786@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
3787 }
3788 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
3789
3790-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
3791+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
3792 {
3793 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
3794 }
3795diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
3796index f072e97..b436dee 100644
3797--- a/arch/powerpc/include/asm/page.h
3798+++ b/arch/powerpc/include/asm/page.h
3799@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
3800 * and needs to be executable. This means the whole heap ends
3801 * up being executable.
3802 */
3803-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3804- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3805+#define VM_DATA_DEFAULT_FLAGS32 \
3806+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3807+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3808
3809 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3810 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3811@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
3812 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3813 #endif
3814
3815+#define ktla_ktva(addr) (addr)
3816+#define ktva_ktla(addr) (addr)
3817+
3818 /*
3819 * Use the top bit of the higher-level page table entries to indicate whether
3820 * the entries we point to contain hugepages. This works because we know that
3821diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
3822index fed85e6..da5c71b 100644
3823--- a/arch/powerpc/include/asm/page_64.h
3824+++ b/arch/powerpc/include/asm/page_64.h
3825@@ -146,15 +146,18 @@ do { \
3826 * stack by default, so in the absence of a PT_GNU_STACK program header
3827 * we turn execute permission off.
3828 */
3829-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3830- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3831+#define VM_STACK_DEFAULT_FLAGS32 \
3832+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3833+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3834
3835 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3836 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3837
3838+#ifndef CONFIG_PAX_PAGEEXEC
3839 #define VM_STACK_DEFAULT_FLAGS \
3840 (is_32bit_task() ? \
3841 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3842+#endif
3843
3844 #include <asm-generic/getorder.h>
3845
3846diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
3847index 292725c..f87ae14 100644
3848--- a/arch/powerpc/include/asm/pgalloc-64.h
3849+++ b/arch/powerpc/include/asm/pgalloc-64.h
3850@@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
3851 #ifndef CONFIG_PPC_64K_PAGES
3852
3853 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
3854+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
3855
3856 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
3857 {
3858@@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3859 pud_set(pud, (unsigned long)pmd);
3860 }
3861
3862+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3863+{
3864+ pud_populate(mm, pud, pmd);
3865+}
3866+
3867 #define pmd_populate(mm, pmd, pte_page) \
3868 pmd_populate_kernel(mm, pmd, page_address(pte_page))
3869 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
3870@@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3871 #else /* CONFIG_PPC_64K_PAGES */
3872
3873 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
3874+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
3875
3876 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
3877 pte_t *pte)
3878diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
3879index 2e0e411..7899c68 100644
3880--- a/arch/powerpc/include/asm/pgtable.h
3881+++ b/arch/powerpc/include/asm/pgtable.h
3882@@ -2,6 +2,7 @@
3883 #define _ASM_POWERPC_PGTABLE_H
3884 #ifdef __KERNEL__
3885
3886+#include <linux/const.h>
3887 #ifndef __ASSEMBLY__
3888 #include <asm/processor.h> /* For TASK_SIZE */
3889 #include <asm/mmu.h>
3890diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3891index 4aad413..85d86bf 100644
3892--- a/arch/powerpc/include/asm/pte-hash32.h
3893+++ b/arch/powerpc/include/asm/pte-hash32.h
3894@@ -21,6 +21,7 @@
3895 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3896 #define _PAGE_USER 0x004 /* usermode access allowed */
3897 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
3898+#define _PAGE_EXEC _PAGE_GUARDED
3899 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3900 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3901 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
3902diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
3903index 9d7f0fb..a28fe69 100644
3904--- a/arch/powerpc/include/asm/reg.h
3905+++ b/arch/powerpc/include/asm/reg.h
3906@@ -212,6 +212,7 @@
3907 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3908 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3909 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3910+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3911 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3912 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3913 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
3914diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
3915index 4a741c7..c8162227b 100644
3916--- a/arch/powerpc/include/asm/thread_info.h
3917+++ b/arch/powerpc/include/asm/thread_info.h
3918@@ -104,12 +104,14 @@ static inline struct thread_info *current_thread_info(void)
3919 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
3920 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
3921 #define TIF_SINGLESTEP 8 /* singlestepping active */
3922-#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
3923 #define TIF_SECCOMP 10 /* secure computing */
3924 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
3925 #define TIF_NOERROR 12 /* Force successful syscall return */
3926 #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
3927 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
3928+#define TIF_MEMDIE 16 /* is terminating due to OOM killer */
3929+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
3930+#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
3931
3932 /* as above, but as bit values */
3933 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
3934@@ -127,8 +129,11 @@ static inline struct thread_info *current_thread_info(void)
3935 #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
3936 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
3937 #define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
3938+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
3939+
3940 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
3941- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
3942+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT \
3943+ _TIF_GRSEC_SETXID)
3944
3945 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
3946 _TIF_NOTIFY_RESUME)
3947diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3948index bd0fb84..a42a14b 100644
3949--- a/arch/powerpc/include/asm/uaccess.h
3950+++ b/arch/powerpc/include/asm/uaccess.h
3951@@ -13,6 +13,8 @@
3952 #define VERIFY_READ 0
3953 #define VERIFY_WRITE 1
3954
3955+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3956+
3957 /*
3958 * The fs value determines whether argument validity checking should be
3959 * performed or not. If get_fs() == USER_DS, checking is performed, with
3960@@ -327,52 +329,6 @@ do { \
3961 extern unsigned long __copy_tofrom_user(void __user *to,
3962 const void __user *from, unsigned long size);
3963
3964-#ifndef __powerpc64__
3965-
3966-static inline unsigned long copy_from_user(void *to,
3967- const void __user *from, unsigned long n)
3968-{
3969- unsigned long over;
3970-
3971- if (access_ok(VERIFY_READ, from, n))
3972- return __copy_tofrom_user((__force void __user *)to, from, n);
3973- if ((unsigned long)from < TASK_SIZE) {
3974- over = (unsigned long)from + n - TASK_SIZE;
3975- return __copy_tofrom_user((__force void __user *)to, from,
3976- n - over) + over;
3977- }
3978- return n;
3979-}
3980-
3981-static inline unsigned long copy_to_user(void __user *to,
3982- const void *from, unsigned long n)
3983-{
3984- unsigned long over;
3985-
3986- if (access_ok(VERIFY_WRITE, to, n))
3987- return __copy_tofrom_user(to, (__force void __user *)from, n);
3988- if ((unsigned long)to < TASK_SIZE) {
3989- over = (unsigned long)to + n - TASK_SIZE;
3990- return __copy_tofrom_user(to, (__force void __user *)from,
3991- n - over) + over;
3992- }
3993- return n;
3994-}
3995-
3996-#else /* __powerpc64__ */
3997-
3998-#define __copy_in_user(to, from, size) \
3999- __copy_tofrom_user((to), (from), (size))
4000-
4001-extern unsigned long copy_from_user(void *to, const void __user *from,
4002- unsigned long n);
4003-extern unsigned long copy_to_user(void __user *to, const void *from,
4004- unsigned long n);
4005-extern unsigned long copy_in_user(void __user *to, const void __user *from,
4006- unsigned long n);
4007-
4008-#endif /* __powerpc64__ */
4009-
4010 static inline unsigned long __copy_from_user_inatomic(void *to,
4011 const void __user *from, unsigned long n)
4012 {
4013@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
4014 if (ret == 0)
4015 return 0;
4016 }
4017+
4018+ if (!__builtin_constant_p(n))
4019+ check_object_size(to, n, false);
4020+
4021 return __copy_tofrom_user((__force void __user *)to, from, n);
4022 }
4023
4024@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
4025 if (ret == 0)
4026 return 0;
4027 }
4028+
4029+ if (!__builtin_constant_p(n))
4030+ check_object_size(from, n, true);
4031+
4032 return __copy_tofrom_user(to, (__force const void __user *)from, n);
4033 }
4034
4035@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
4036 return __copy_to_user_inatomic(to, from, size);
4037 }
4038
4039+#ifndef __powerpc64__
4040+
4041+static inline unsigned long __must_check copy_from_user(void *to,
4042+ const void __user *from, unsigned long n)
4043+{
4044+ unsigned long over;
4045+
4046+ if ((long)n < 0)
4047+ return n;
4048+
4049+ if (access_ok(VERIFY_READ, from, n)) {
4050+ if (!__builtin_constant_p(n))
4051+ check_object_size(to, n, false);
4052+ return __copy_tofrom_user((__force void __user *)to, from, n);
4053+ }
4054+ if ((unsigned long)from < TASK_SIZE) {
4055+ over = (unsigned long)from + n - TASK_SIZE;
4056+ if (!__builtin_constant_p(n - over))
4057+ check_object_size(to, n - over, false);
4058+ return __copy_tofrom_user((__force void __user *)to, from,
4059+ n - over) + over;
4060+ }
4061+ return n;
4062+}
4063+
4064+static inline unsigned long __must_check copy_to_user(void __user *to,
4065+ const void *from, unsigned long n)
4066+{
4067+ unsigned long over;
4068+
4069+ if ((long)n < 0)
4070+ return n;
4071+
4072+ if (access_ok(VERIFY_WRITE, to, n)) {
4073+ if (!__builtin_constant_p(n))
4074+ check_object_size(from, n, true);
4075+ return __copy_tofrom_user(to, (__force void __user *)from, n);
4076+ }
4077+ if ((unsigned long)to < TASK_SIZE) {
4078+ over = (unsigned long)to + n - TASK_SIZE;
4079+ if (!__builtin_constant_p(n))
4080+ check_object_size(from, n - over, true);
4081+ return __copy_tofrom_user(to, (__force void __user *)from,
4082+ n - over) + over;
4083+ }
4084+ return n;
4085+}
4086+
4087+#else /* __powerpc64__ */
4088+
4089+#define __copy_in_user(to, from, size) \
4090+ __copy_tofrom_user((to), (from), (size))
4091+
4092+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
4093+{
4094+ if ((long)n < 0 || n > INT_MAX)
4095+ return n;
4096+
4097+ if (!__builtin_constant_p(n))
4098+ check_object_size(to, n, false);
4099+
4100+ if (likely(access_ok(VERIFY_READ, from, n)))
4101+ n = __copy_from_user(to, from, n);
4102+ else
4103+ memset(to, 0, n);
4104+ return n;
4105+}
4106+
4107+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
4108+{
4109+ if ((long)n < 0 || n > INT_MAX)
4110+ return n;
4111+
4112+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
4113+ if (!__builtin_constant_p(n))
4114+ check_object_size(from, n, true);
4115+ n = __copy_to_user(to, from, n);
4116+ }
4117+ return n;
4118+}
4119+
4120+extern unsigned long copy_in_user(void __user *to, const void __user *from,
4121+ unsigned long n);
4122+
4123+#endif /* __powerpc64__ */
4124+
4125 extern unsigned long __clear_user(void __user *addr, unsigned long size);
4126
4127 static inline unsigned long clear_user(void __user *addr, unsigned long size)
4128diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
4129index 7215cc2..a9730c1 100644
4130--- a/arch/powerpc/kernel/exceptions-64e.S
4131+++ b/arch/powerpc/kernel/exceptions-64e.S
4132@@ -661,6 +661,7 @@ storage_fault_common:
4133 std r14,_DAR(r1)
4134 std r15,_DSISR(r1)
4135 addi r3,r1,STACK_FRAME_OVERHEAD
4136+ bl .save_nvgprs
4137 mr r4,r14
4138 mr r5,r15
4139 ld r14,PACA_EXGEN+EX_R14(r13)
4140@@ -669,8 +670,7 @@ storage_fault_common:
4141 cmpdi r3,0
4142 bne- 1f
4143 b .ret_from_except_lite
4144-1: bl .save_nvgprs
4145- mr r5,r3
4146+1: mr r5,r3
4147 addi r3,r1,STACK_FRAME_OVERHEAD
4148 ld r4,_DAR(r1)
4149 bl .bad_page_fault
4150diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
4151index 8f880bc..c5bd2f3 100644
4152--- a/arch/powerpc/kernel/exceptions-64s.S
4153+++ b/arch/powerpc/kernel/exceptions-64s.S
4154@@ -890,10 +890,10 @@ handle_page_fault:
4155 11: ld r4,_DAR(r1)
4156 ld r5,_DSISR(r1)
4157 addi r3,r1,STACK_FRAME_OVERHEAD
4158+ bl .save_nvgprs
4159 bl .do_page_fault
4160 cmpdi r3,0
4161 beq+ 12f
4162- bl .save_nvgprs
4163 mr r5,r3
4164 addi r3,r1,STACK_FRAME_OVERHEAD
4165 lwz r4,_DAR(r1)
4166diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
4167index 2e3200c..72095ce 100644
4168--- a/arch/powerpc/kernel/module_32.c
4169+++ b/arch/powerpc/kernel/module_32.c
4170@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
4171 me->arch.core_plt_section = i;
4172 }
4173 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
4174- printk("Module doesn't contain .plt or .init.plt sections.\n");
4175+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
4176 return -ENOEXEC;
4177 }
4178
4179@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
4180
4181 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
4182 /* Init, or core PLT? */
4183- if (location >= mod->module_core
4184- && location < mod->module_core + mod->core_size)
4185+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
4186+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
4187 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
4188- else
4189+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
4190+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
4191 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
4192+ else {
4193+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
4194+ return ~0UL;
4195+ }
4196
4197 /* Find this entry, or if that fails, the next avail. entry */
4198 while (entry->jump[0]) {
4199diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
4200index 4937c96..70714b7 100644
4201--- a/arch/powerpc/kernel/process.c
4202+++ b/arch/powerpc/kernel/process.c
4203@@ -681,8 +681,8 @@ void show_regs(struct pt_regs * regs)
4204 * Lookup NIP late so we have the best change of getting the
4205 * above info out without failing
4206 */
4207- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
4208- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
4209+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
4210+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
4211 #endif
4212 show_stack(current, (unsigned long *) regs->gpr[1]);
4213 if (!user_mode(regs))
4214@@ -1186,10 +1186,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4215 newsp = stack[0];
4216 ip = stack[STACK_FRAME_LR_SAVE];
4217 if (!firstframe || ip != lr) {
4218- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
4219+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
4220 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4221 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
4222- printk(" (%pS)",
4223+ printk(" (%pA)",
4224 (void *)current->ret_stack[curr_frame].ret);
4225 curr_frame--;
4226 }
4227@@ -1209,7 +1209,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4228 struct pt_regs *regs = (struct pt_regs *)
4229 (sp + STACK_FRAME_OVERHEAD);
4230 lr = regs->link;
4231- printk("--- Exception: %lx at %pS\n LR = %pS\n",
4232+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
4233 regs->trap, (void *)regs->nip, (void *)lr);
4234 firstframe = 1;
4235 }
4236@@ -1282,58 +1282,3 @@ void thread_info_cache_init(void)
4237 }
4238
4239 #endif /* THREAD_SHIFT < PAGE_SHIFT */
4240-
4241-unsigned long arch_align_stack(unsigned long sp)
4242-{
4243- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4244- sp -= get_random_int() & ~PAGE_MASK;
4245- return sp & ~0xf;
4246-}
4247-
4248-static inline unsigned long brk_rnd(void)
4249-{
4250- unsigned long rnd = 0;
4251-
4252- /* 8MB for 32bit, 1GB for 64bit */
4253- if (is_32bit_task())
4254- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
4255- else
4256- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
4257-
4258- return rnd << PAGE_SHIFT;
4259-}
4260-
4261-unsigned long arch_randomize_brk(struct mm_struct *mm)
4262-{
4263- unsigned long base = mm->brk;
4264- unsigned long ret;
4265-
4266-#ifdef CONFIG_PPC_STD_MMU_64
4267- /*
4268- * If we are using 1TB segments and we are allowed to randomise
4269- * the heap, we can put it above 1TB so it is backed by a 1TB
4270- * segment. Otherwise the heap will be in the bottom 1TB
4271- * which always uses 256MB segments and this may result in a
4272- * performance penalty.
4273- */
4274- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
4275- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
4276-#endif
4277-
4278- ret = PAGE_ALIGN(base + brk_rnd());
4279-
4280- if (ret < mm->brk)
4281- return mm->brk;
4282-
4283- return ret;
4284-}
4285-
4286-unsigned long randomize_et_dyn(unsigned long base)
4287-{
4288- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4289-
4290- if (ret < base)
4291- return base;
4292-
4293- return ret;
4294-}
4295diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
4296index 8d8e028..c2aeb50 100644
4297--- a/arch/powerpc/kernel/ptrace.c
4298+++ b/arch/powerpc/kernel/ptrace.c
4299@@ -1702,6 +1702,10 @@ long arch_ptrace(struct task_struct *child, long request,
4300 return ret;
4301 }
4302
4303+#ifdef CONFIG_GRKERNSEC_SETXID
4304+extern void gr_delayed_cred_worker(void);
4305+#endif
4306+
4307 /*
4308 * We must return the syscall number to actually look up in the table.
4309 * This can be -1L to skip running any syscall at all.
4310@@ -1712,6 +1716,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
4311
4312 secure_computing(regs->gpr[0]);
4313
4314+#ifdef CONFIG_GRKERNSEC_SETXID
4315+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4316+ gr_delayed_cred_worker();
4317+#endif
4318+
4319 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
4320 tracehook_report_syscall_entry(regs))
4321 /*
4322@@ -1746,6 +1755,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
4323 {
4324 int step;
4325
4326+#ifdef CONFIG_GRKERNSEC_SETXID
4327+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4328+ gr_delayed_cred_worker();
4329+#endif
4330+
4331 audit_syscall_exit(regs);
4332
4333 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
4334diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
4335index 45eb998..0cb36bc 100644
4336--- a/arch/powerpc/kernel/signal_32.c
4337+++ b/arch/powerpc/kernel/signal_32.c
4338@@ -861,7 +861,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
4339 /* Save user registers on the stack */
4340 frame = &rt_sf->uc.uc_mcontext;
4341 addr = frame;
4342- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
4343+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4344 if (save_user_regs(regs, frame, 0, 1))
4345 goto badframe;
4346 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
4347diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
4348index 2692efd..6673d2e 100644
4349--- a/arch/powerpc/kernel/signal_64.c
4350+++ b/arch/powerpc/kernel/signal_64.c
4351@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
4352 current->thread.fpscr.val = 0;
4353
4354 /* Set up to return from userspace. */
4355- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
4356+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4357 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
4358 } else {
4359 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
4360diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
4361index 1589723..cefe690 100644
4362--- a/arch/powerpc/kernel/traps.c
4363+++ b/arch/powerpc/kernel/traps.c
4364@@ -133,6 +133,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
4365 return flags;
4366 }
4367
4368+extern void gr_handle_kernel_exploit(void);
4369+
4370 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4371 int signr)
4372 {
4373@@ -182,6 +184,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4374 panic("Fatal exception in interrupt");
4375 if (panic_on_oops)
4376 panic("Fatal exception");
4377+
4378+ gr_handle_kernel_exploit();
4379+
4380 do_exit(signr);
4381 }
4382
4383diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
4384index 9eb5b9b..e45498a 100644
4385--- a/arch/powerpc/kernel/vdso.c
4386+++ b/arch/powerpc/kernel/vdso.c
4387@@ -34,6 +34,7 @@
4388 #include <asm/firmware.h>
4389 #include <asm/vdso.h>
4390 #include <asm/vdso_datapage.h>
4391+#include <asm/mman.h>
4392
4393 #include "setup.h"
4394
4395@@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4396 vdso_base = VDSO32_MBASE;
4397 #endif
4398
4399- current->mm->context.vdso_base = 0;
4400+ current->mm->context.vdso_base = ~0UL;
4401
4402 /* vDSO has a problem and was disabled, just don't "enable" it for the
4403 * process
4404@@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4405 vdso_base = get_unmapped_area(NULL, vdso_base,
4406 (vdso_pages << PAGE_SHIFT) +
4407 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
4408- 0, 0);
4409+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
4410 if (IS_ERR_VALUE(vdso_base)) {
4411 rc = vdso_base;
4412 goto fail_mmapsem;
4413diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
4414index 5eea6f3..5d10396 100644
4415--- a/arch/powerpc/lib/usercopy_64.c
4416+++ b/arch/powerpc/lib/usercopy_64.c
4417@@ -9,22 +9,6 @@
4418 #include <linux/module.h>
4419 #include <asm/uaccess.h>
4420
4421-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4422-{
4423- if (likely(access_ok(VERIFY_READ, from, n)))
4424- n = __copy_from_user(to, from, n);
4425- else
4426- memset(to, 0, n);
4427- return n;
4428-}
4429-
4430-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4431-{
4432- if (likely(access_ok(VERIFY_WRITE, to, n)))
4433- n = __copy_to_user(to, from, n);
4434- return n;
4435-}
4436-
4437 unsigned long copy_in_user(void __user *to, const void __user *from,
4438 unsigned long n)
4439 {
4440@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
4441 return n;
4442 }
4443
4444-EXPORT_SYMBOL(copy_from_user);
4445-EXPORT_SYMBOL(copy_to_user);
4446 EXPORT_SYMBOL(copy_in_user);
4447
4448diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
4449index 08ffcf5..a0ab912 100644
4450--- a/arch/powerpc/mm/fault.c
4451+++ b/arch/powerpc/mm/fault.c
4452@@ -32,6 +32,10 @@
4453 #include <linux/perf_event.h>
4454 #include <linux/magic.h>
4455 #include <linux/ratelimit.h>
4456+#include <linux/slab.h>
4457+#include <linux/pagemap.h>
4458+#include <linux/compiler.h>
4459+#include <linux/unistd.h>
4460
4461 #include <asm/firmware.h>
4462 #include <asm/page.h>
4463@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
4464 }
4465 #endif
4466
4467+#ifdef CONFIG_PAX_PAGEEXEC
4468+/*
4469+ * PaX: decide what to do with offenders (regs->nip = fault address)
4470+ *
4471+ * returns 1 when task should be killed
4472+ */
4473+static int pax_handle_fetch_fault(struct pt_regs *regs)
4474+{
4475+ return 1;
4476+}
4477+
4478+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4479+{
4480+ unsigned long i;
4481+
4482+ printk(KERN_ERR "PAX: bytes at PC: ");
4483+ for (i = 0; i < 5; i++) {
4484+ unsigned int c;
4485+ if (get_user(c, (unsigned int __user *)pc+i))
4486+ printk(KERN_CONT "???????? ");
4487+ else
4488+ printk(KERN_CONT "%08x ", c);
4489+ }
4490+ printk("\n");
4491+}
4492+#endif
4493+
4494 /*
4495 * Check whether the instruction at regs->nip is a store using
4496 * an update addressing form which will update r1.
4497@@ -215,7 +246,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
4498 * indicate errors in DSISR but can validly be set in SRR1.
4499 */
4500 if (trap == 0x400)
4501- error_code &= 0x48200000;
4502+ error_code &= 0x58200000;
4503 else
4504 is_write = error_code & DSISR_ISSTORE;
4505 #else
4506@@ -366,7 +397,7 @@ good_area:
4507 * "undefined". Of those that can be set, this is the only
4508 * one which seems bad.
4509 */
4510- if (error_code & 0x10000000)
4511+ if (error_code & DSISR_GUARDED)
4512 /* Guarded storage error. */
4513 goto bad_area;
4514 #endif /* CONFIG_8xx */
4515@@ -381,7 +412,7 @@ good_area:
4516 * processors use the same I/D cache coherency mechanism
4517 * as embedded.
4518 */
4519- if (error_code & DSISR_PROTFAULT)
4520+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4521 goto bad_area;
4522 #endif /* CONFIG_PPC_STD_MMU */
4523
4524@@ -463,6 +494,23 @@ bad_area:
4525 bad_area_nosemaphore:
4526 /* User mode accesses cause a SIGSEGV */
4527 if (user_mode(regs)) {
4528+
4529+#ifdef CONFIG_PAX_PAGEEXEC
4530+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4531+#ifdef CONFIG_PPC_STD_MMU
4532+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4533+#else
4534+ if (is_exec && regs->nip == address) {
4535+#endif
4536+ switch (pax_handle_fetch_fault(regs)) {
4537+ }
4538+
4539+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4540+ do_group_exit(SIGKILL);
4541+ }
4542+ }
4543+#endif
4544+
4545 _exception(SIGSEGV, regs, code, address);
4546 return 0;
4547 }
4548diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
4549index 67a42ed..1c7210c 100644
4550--- a/arch/powerpc/mm/mmap_64.c
4551+++ b/arch/powerpc/mm/mmap_64.c
4552@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4553 */
4554 if (mmap_is_legacy()) {
4555 mm->mmap_base = TASK_UNMAPPED_BASE;
4556+
4557+#ifdef CONFIG_PAX_RANDMMAP
4558+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4559+ mm->mmap_base += mm->delta_mmap;
4560+#endif
4561+
4562 mm->get_unmapped_area = arch_get_unmapped_area;
4563 mm->unmap_area = arch_unmap_area;
4564 } else {
4565 mm->mmap_base = mmap_base();
4566+
4567+#ifdef CONFIG_PAX_RANDMMAP
4568+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4569+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4570+#endif
4571+
4572 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4573 mm->unmap_area = arch_unmap_area_topdown;
4574 }
4575diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4576index 73709f7..6b90313 100644
4577--- a/arch/powerpc/mm/slice.c
4578+++ b/arch/powerpc/mm/slice.c
4579@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
4580 if ((mm->task_size - len) < addr)
4581 return 0;
4582 vma = find_vma(mm, addr);
4583- return (!vma || (addr + len) <= vma->vm_start);
4584+ return check_heap_stack_gap(vma, addr, len);
4585 }
4586
4587 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4588@@ -256,7 +256,7 @@ full_search:
4589 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4590 continue;
4591 }
4592- if (!vma || addr + len <= vma->vm_start) {
4593+ if (check_heap_stack_gap(vma, addr, len)) {
4594 /*
4595 * Remember the place where we stopped the search:
4596 */
4597@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4598 }
4599 }
4600
4601- addr = mm->mmap_base;
4602- while (addr > len) {
4603+ if (mm->mmap_base < len)
4604+ addr = -ENOMEM;
4605+ else
4606+ addr = mm->mmap_base - len;
4607+
4608+ while (!IS_ERR_VALUE(addr)) {
4609 /* Go down by chunk size */
4610- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4611+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
4612
4613 /* Check for hit with different page size */
4614 mask = slice_range_to_mask(addr, len);
4615@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4616 * return with success:
4617 */
4618 vma = find_vma(mm, addr);
4619- if (!vma || (addr + len) <= vma->vm_start) {
4620+ if (check_heap_stack_gap(vma, addr, len)) {
4621 /* remember the address as a hint for next time */
4622 if (use_cache)
4623 mm->free_area_cache = addr;
4624@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4625 mm->cached_hole_size = vma->vm_start - addr;
4626
4627 /* try just below the current vma->vm_start */
4628- addr = vma->vm_start;
4629+ addr = skip_heap_stack_gap(vma, len);
4630 }
4631
4632 /*
4633@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
4634 if (fixed && addr > (mm->task_size - len))
4635 return -EINVAL;
4636
4637+#ifdef CONFIG_PAX_RANDMMAP
4638+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4639+ addr = 0;
4640+#endif
4641+
4642 /* If hint, make sure it matches our alignment restrictions */
4643 if (!fixed && addr) {
4644 addr = _ALIGN_UP(addr, 1ul << pshift);
4645diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4646index 748347b..81bc6c7 100644
4647--- a/arch/s390/include/asm/atomic.h
4648+++ b/arch/s390/include/asm/atomic.h
4649@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
4650 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4651 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4652
4653+#define atomic64_read_unchecked(v) atomic64_read(v)
4654+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4655+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4656+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4657+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4658+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4659+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4660+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4661+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4662+
4663 #define smp_mb__before_atomic_dec() smp_mb()
4664 #define smp_mb__after_atomic_dec() smp_mb()
4665 #define smp_mb__before_atomic_inc() smp_mb()
4666diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4667index 2a30d5a..5e5586f 100644
4668--- a/arch/s390/include/asm/cache.h
4669+++ b/arch/s390/include/asm/cache.h
4670@@ -11,8 +11,10 @@
4671 #ifndef __ARCH_S390_CACHE_H
4672 #define __ARCH_S390_CACHE_H
4673
4674-#define L1_CACHE_BYTES 256
4675+#include <linux/const.h>
4676+
4677 #define L1_CACHE_SHIFT 8
4678+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4679 #define NET_SKB_PAD 32
4680
4681 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4682diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4683index c4ee39f..352881b 100644
4684--- a/arch/s390/include/asm/elf.h
4685+++ b/arch/s390/include/asm/elf.h
4686@@ -161,8 +161,14 @@ extern unsigned int vdso_enabled;
4687 the loader. We need to make sure that it is out of the way of the program
4688 that it will "exec", and that there is sufficient room for the brk. */
4689
4690-extern unsigned long randomize_et_dyn(unsigned long base);
4691-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
4692+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4693+
4694+#ifdef CONFIG_PAX_ASLR
4695+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4696+
4697+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4698+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4699+#endif
4700
4701 /* This yields a mask that user programs can use to figure out what
4702 instruction set this CPU supports. */
4703@@ -210,7 +216,4 @@ struct linux_binprm;
4704 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
4705 int arch_setup_additional_pages(struct linux_binprm *, int);
4706
4707-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
4708-#define arch_randomize_brk arch_randomize_brk
4709-
4710 #endif
4711diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
4712index c4a93d6..4d2a9b4 100644
4713--- a/arch/s390/include/asm/exec.h
4714+++ b/arch/s390/include/asm/exec.h
4715@@ -7,6 +7,6 @@
4716 #ifndef __ASM_EXEC_H
4717 #define __ASM_EXEC_H
4718
4719-extern unsigned long arch_align_stack(unsigned long sp);
4720+#define arch_align_stack(x) ((x) & ~0xfUL)
4721
4722 #endif /* __ASM_EXEC_H */
4723diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4724index 8f2cada..43072c1 100644
4725--- a/arch/s390/include/asm/uaccess.h
4726+++ b/arch/s390/include/asm/uaccess.h
4727@@ -236,6 +236,10 @@ static inline unsigned long __must_check
4728 copy_to_user(void __user *to, const void *from, unsigned long n)
4729 {
4730 might_fault();
4731+
4732+ if ((long)n < 0)
4733+ return n;
4734+
4735 if (access_ok(VERIFY_WRITE, to, n))
4736 n = __copy_to_user(to, from, n);
4737 return n;
4738@@ -261,6 +265,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4739 static inline unsigned long __must_check
4740 __copy_from_user(void *to, const void __user *from, unsigned long n)
4741 {
4742+ if ((long)n < 0)
4743+ return n;
4744+
4745 if (__builtin_constant_p(n) && (n <= 256))
4746 return uaccess.copy_from_user_small(n, from, to);
4747 else
4748@@ -292,10 +299,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
4749 static inline unsigned long __must_check
4750 copy_from_user(void *to, const void __user *from, unsigned long n)
4751 {
4752- unsigned int sz = __compiletime_object_size(to);
4753+ size_t sz = __compiletime_object_size(to);
4754
4755 might_fault();
4756- if (unlikely(sz != -1 && sz < n)) {
4757+
4758+ if ((long)n < 0)
4759+ return n;
4760+
4761+ if (unlikely(sz != (size_t)-1 && sz < n)) {
4762 copy_from_user_overflow();
4763 return n;
4764 }
4765diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4766index dfcb343..eda788a 100644
4767--- a/arch/s390/kernel/module.c
4768+++ b/arch/s390/kernel/module.c
4769@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4770
4771 /* Increase core size by size of got & plt and set start
4772 offsets for got and plt. */
4773- me->core_size = ALIGN(me->core_size, 4);
4774- me->arch.got_offset = me->core_size;
4775- me->core_size += me->arch.got_size;
4776- me->arch.plt_offset = me->core_size;
4777- me->core_size += me->arch.plt_size;
4778+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
4779+ me->arch.got_offset = me->core_size_rw;
4780+ me->core_size_rw += me->arch.got_size;
4781+ me->arch.plt_offset = me->core_size_rx;
4782+ me->core_size_rx += me->arch.plt_size;
4783 return 0;
4784 }
4785
4786@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4787 if (info->got_initialized == 0) {
4788 Elf_Addr *gotent;
4789
4790- gotent = me->module_core + me->arch.got_offset +
4791+ gotent = me->module_core_rw + me->arch.got_offset +
4792 info->got_offset;
4793 *gotent = val;
4794 info->got_initialized = 1;
4795@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4796 else if (r_type == R_390_GOTENT ||
4797 r_type == R_390_GOTPLTENT)
4798 *(unsigned int *) loc =
4799- (val + (Elf_Addr) me->module_core - loc) >> 1;
4800+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4801 else if (r_type == R_390_GOT64 ||
4802 r_type == R_390_GOTPLT64)
4803 *(unsigned long *) loc = val;
4804@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4805 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4806 if (info->plt_initialized == 0) {
4807 unsigned int *ip;
4808- ip = me->module_core + me->arch.plt_offset +
4809+ ip = me->module_core_rx + me->arch.plt_offset +
4810 info->plt_offset;
4811 #ifndef CONFIG_64BIT
4812 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4813@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4814 val - loc + 0xffffUL < 0x1ffffeUL) ||
4815 (r_type == R_390_PLT32DBL &&
4816 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4817- val = (Elf_Addr) me->module_core +
4818+ val = (Elf_Addr) me->module_core_rx +
4819 me->arch.plt_offset +
4820 info->plt_offset;
4821 val += rela->r_addend - loc;
4822@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4823 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4824 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4825 val = val + rela->r_addend -
4826- ((Elf_Addr) me->module_core + me->arch.got_offset);
4827+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4828 if (r_type == R_390_GOTOFF16)
4829 *(unsigned short *) loc = val;
4830 else if (r_type == R_390_GOTOFF32)
4831@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4832 break;
4833 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4834 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4835- val = (Elf_Addr) me->module_core + me->arch.got_offset +
4836+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4837 rela->r_addend - loc;
4838 if (r_type == R_390_GOTPC)
4839 *(unsigned int *) loc = val;
4840diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
4841index 60055ce..ee4b252 100644
4842--- a/arch/s390/kernel/process.c
4843+++ b/arch/s390/kernel/process.c
4844@@ -316,39 +316,3 @@ unsigned long get_wchan(struct task_struct *p)
4845 }
4846 return 0;
4847 }
4848-
4849-unsigned long arch_align_stack(unsigned long sp)
4850-{
4851- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4852- sp -= get_random_int() & ~PAGE_MASK;
4853- return sp & ~0xf;
4854-}
4855-
4856-static inline unsigned long brk_rnd(void)
4857-{
4858- /* 8MB for 32bit, 1GB for 64bit */
4859- if (is_32bit_task())
4860- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
4861- else
4862- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
4863-}
4864-
4865-unsigned long arch_randomize_brk(struct mm_struct *mm)
4866-{
4867- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
4868-
4869- if (ret < mm->brk)
4870- return mm->brk;
4871- return ret;
4872-}
4873-
4874-unsigned long randomize_et_dyn(unsigned long base)
4875-{
4876- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4877-
4878- if (!(current->flags & PF_RANDOMIZE))
4879- return base;
4880- if (ret < base)
4881- return base;
4882- return ret;
4883-}
4884diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4885index 2857c48..d047481 100644
4886--- a/arch/s390/mm/mmap.c
4887+++ b/arch/s390/mm/mmap.c
4888@@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4889 */
4890 if (mmap_is_legacy()) {
4891 mm->mmap_base = TASK_UNMAPPED_BASE;
4892+
4893+#ifdef CONFIG_PAX_RANDMMAP
4894+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4895+ mm->mmap_base += mm->delta_mmap;
4896+#endif
4897+
4898 mm->get_unmapped_area = arch_get_unmapped_area;
4899 mm->unmap_area = arch_unmap_area;
4900 } else {
4901 mm->mmap_base = mmap_base();
4902+
4903+#ifdef CONFIG_PAX_RANDMMAP
4904+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4905+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4906+#endif
4907+
4908 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4909 mm->unmap_area = arch_unmap_area_topdown;
4910 }
4911@@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4912 */
4913 if (mmap_is_legacy()) {
4914 mm->mmap_base = TASK_UNMAPPED_BASE;
4915+
4916+#ifdef CONFIG_PAX_RANDMMAP
4917+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4918+ mm->mmap_base += mm->delta_mmap;
4919+#endif
4920+
4921 mm->get_unmapped_area = s390_get_unmapped_area;
4922 mm->unmap_area = arch_unmap_area;
4923 } else {
4924 mm->mmap_base = mmap_base();
4925+
4926+#ifdef CONFIG_PAX_RANDMMAP
4927+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4928+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4929+#endif
4930+
4931 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4932 mm->unmap_area = arch_unmap_area_topdown;
4933 }
4934diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
4935index ae3d59f..f65f075 100644
4936--- a/arch/score/include/asm/cache.h
4937+++ b/arch/score/include/asm/cache.h
4938@@ -1,7 +1,9 @@
4939 #ifndef _ASM_SCORE_CACHE_H
4940 #define _ASM_SCORE_CACHE_H
4941
4942+#include <linux/const.h>
4943+
4944 #define L1_CACHE_SHIFT 4
4945-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4946+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4947
4948 #endif /* _ASM_SCORE_CACHE_H */
4949diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
4950index f9f3cd5..58ff438 100644
4951--- a/arch/score/include/asm/exec.h
4952+++ b/arch/score/include/asm/exec.h
4953@@ -1,6 +1,6 @@
4954 #ifndef _ASM_SCORE_EXEC_H
4955 #define _ASM_SCORE_EXEC_H
4956
4957-extern unsigned long arch_align_stack(unsigned long sp);
4958+#define arch_align_stack(x) (x)
4959
4960 #endif /* _ASM_SCORE_EXEC_H */
4961diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4962index 2707023..1c2a3b7 100644
4963--- a/arch/score/kernel/process.c
4964+++ b/arch/score/kernel/process.c
4965@@ -159,8 +159,3 @@ unsigned long get_wchan(struct task_struct *task)
4966
4967 return task_pt_regs(task)->cp0_epc;
4968 }
4969-
4970-unsigned long arch_align_stack(unsigned long sp)
4971-{
4972- return sp;
4973-}
4974diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
4975index ef9e555..331bd29 100644
4976--- a/arch/sh/include/asm/cache.h
4977+++ b/arch/sh/include/asm/cache.h
4978@@ -9,10 +9,11 @@
4979 #define __ASM_SH_CACHE_H
4980 #ifdef __KERNEL__
4981
4982+#include <linux/const.h>
4983 #include <linux/init.h>
4984 #include <cpu/cache.h>
4985
4986-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4987+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4988
4989 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4990
4991diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4992index afeb710..d1d1289 100644
4993--- a/arch/sh/mm/mmap.c
4994+++ b/arch/sh/mm/mmap.c
4995@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4996 addr = PAGE_ALIGN(addr);
4997
4998 vma = find_vma(mm, addr);
4999- if (TASK_SIZE - len >= addr &&
5000- (!vma || addr + len <= vma->vm_start))
5001+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5002 return addr;
5003 }
5004
5005@@ -106,7 +105,7 @@ full_search:
5006 }
5007 return -ENOMEM;
5008 }
5009- if (likely(!vma || addr + len <= vma->vm_start)) {
5010+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5011 /*
5012 * Remember the place where we stopped the search:
5013 */
5014@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5015 addr = PAGE_ALIGN(addr);
5016
5017 vma = find_vma(mm, addr);
5018- if (TASK_SIZE - len >= addr &&
5019- (!vma || addr + len <= vma->vm_start))
5020+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5021 return addr;
5022 }
5023
5024@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5025 /* make sure it can fit in the remaining address space */
5026 if (likely(addr > len)) {
5027 vma = find_vma(mm, addr-len);
5028- if (!vma || addr <= vma->vm_start) {
5029+ if (check_heap_stack_gap(vma, addr - len, len)) {
5030 /* remember the address as a hint for next time */
5031 return (mm->free_area_cache = addr-len);
5032 }
5033@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5034 if (unlikely(mm->mmap_base < len))
5035 goto bottomup;
5036
5037- addr = mm->mmap_base-len;
5038- if (do_colour_align)
5039- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5040+ addr = mm->mmap_base - len;
5041
5042 do {
5043+ if (do_colour_align)
5044+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5045 /*
5046 * Lookup failure means no vma is above this address,
5047 * else if new region fits below vma->vm_start,
5048 * return with success:
5049 */
5050 vma = find_vma(mm, addr);
5051- if (likely(!vma || addr+len <= vma->vm_start)) {
5052+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5053 /* remember the address as a hint for next time */
5054 return (mm->free_area_cache = addr);
5055 }
5056@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5057 mm->cached_hole_size = vma->vm_start - addr;
5058
5059 /* try just below the current vma->vm_start */
5060- addr = vma->vm_start-len;
5061- if (do_colour_align)
5062- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5063- } while (likely(len < vma->vm_start));
5064+ addr = skip_heap_stack_gap(vma, len);
5065+ } while (!IS_ERR_VALUE(addr));
5066
5067 bottomup:
5068 /*
5069diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
5070index eddcfb3..b117d90 100644
5071--- a/arch/sparc/Makefile
5072+++ b/arch/sparc/Makefile
5073@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
5074 # Export what is needed by arch/sparc/boot/Makefile
5075 export VMLINUX_INIT VMLINUX_MAIN
5076 VMLINUX_INIT := $(head-y) $(init-y)
5077-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5078+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5079 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5080 VMLINUX_MAIN += $(drivers-y) $(net-y)
5081
5082diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
5083index ce35a1c..2e7b8f9 100644
5084--- a/arch/sparc/include/asm/atomic_64.h
5085+++ b/arch/sparc/include/asm/atomic_64.h
5086@@ -14,18 +14,40 @@
5087 #define ATOMIC64_INIT(i) { (i) }
5088
5089 #define atomic_read(v) (*(volatile int *)&(v)->counter)
5090+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5091+{
5092+ return v->counter;
5093+}
5094 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
5095+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5096+{
5097+ return v->counter;
5098+}
5099
5100 #define atomic_set(v, i) (((v)->counter) = i)
5101+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5102+{
5103+ v->counter = i;
5104+}
5105 #define atomic64_set(v, i) (((v)->counter) = i)
5106+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5107+{
5108+ v->counter = i;
5109+}
5110
5111 extern void atomic_add(int, atomic_t *);
5112+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
5113 extern void atomic64_add(long, atomic64_t *);
5114+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
5115 extern void atomic_sub(int, atomic_t *);
5116+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
5117 extern void atomic64_sub(long, atomic64_t *);
5118+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
5119
5120 extern int atomic_add_ret(int, atomic_t *);
5121+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
5122 extern long atomic64_add_ret(long, atomic64_t *);
5123+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
5124 extern int atomic_sub_ret(int, atomic_t *);
5125 extern long atomic64_sub_ret(long, atomic64_t *);
5126
5127@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5128 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
5129
5130 #define atomic_inc_return(v) atomic_add_ret(1, v)
5131+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5132+{
5133+ return atomic_add_ret_unchecked(1, v);
5134+}
5135 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
5136+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5137+{
5138+ return atomic64_add_ret_unchecked(1, v);
5139+}
5140
5141 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
5142 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
5143
5144 #define atomic_add_return(i, v) atomic_add_ret(i, v)
5145+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5146+{
5147+ return atomic_add_ret_unchecked(i, v);
5148+}
5149 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
5150+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
5151+{
5152+ return atomic64_add_ret_unchecked(i, v);
5153+}
5154
5155 /*
5156 * atomic_inc_and_test - increment and test
5157@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5158 * other cases.
5159 */
5160 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5161+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5162+{
5163+ return atomic_inc_return_unchecked(v) == 0;
5164+}
5165 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
5166
5167 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
5168@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5169 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
5170
5171 #define atomic_inc(v) atomic_add(1, v)
5172+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
5173+{
5174+ atomic_add_unchecked(1, v);
5175+}
5176 #define atomic64_inc(v) atomic64_add(1, v)
5177+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
5178+{
5179+ atomic64_add_unchecked(1, v);
5180+}
5181
5182 #define atomic_dec(v) atomic_sub(1, v)
5183+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
5184+{
5185+ atomic_sub_unchecked(1, v);
5186+}
5187 #define atomic64_dec(v) atomic64_sub(1, v)
5188+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
5189+{
5190+ atomic64_sub_unchecked(1, v);
5191+}
5192
5193 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
5194 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
5195
5196 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5197+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
5198+{
5199+ return cmpxchg(&v->counter, old, new);
5200+}
5201 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
5202+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5203+{
5204+ return xchg(&v->counter, new);
5205+}
5206
5207 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5208 {
5209- int c, old;
5210+ int c, old, new;
5211 c = atomic_read(v);
5212 for (;;) {
5213- if (unlikely(c == (u)))
5214+ if (unlikely(c == u))
5215 break;
5216- old = atomic_cmpxchg((v), c, c + (a));
5217+
5218+ asm volatile("addcc %2, %0, %0\n"
5219+
5220+#ifdef CONFIG_PAX_REFCOUNT
5221+ "tvs %%icc, 6\n"
5222+#endif
5223+
5224+ : "=r" (new)
5225+ : "0" (c), "ir" (a)
5226+ : "cc");
5227+
5228+ old = atomic_cmpxchg(v, c, new);
5229 if (likely(old == c))
5230 break;
5231 c = old;
5232@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5233 #define atomic64_cmpxchg(v, o, n) \
5234 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
5235 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
5236+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
5237+{
5238+ return xchg(&v->counter, new);
5239+}
5240
5241 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
5242 {
5243- long c, old;
5244+ long c, old, new;
5245 c = atomic64_read(v);
5246 for (;;) {
5247- if (unlikely(c == (u)))
5248+ if (unlikely(c == u))
5249 break;
5250- old = atomic64_cmpxchg((v), c, c + (a));
5251+
5252+ asm volatile("addcc %2, %0, %0\n"
5253+
5254+#ifdef CONFIG_PAX_REFCOUNT
5255+ "tvs %%xcc, 6\n"
5256+#endif
5257+
5258+ : "=r" (new)
5259+ : "0" (c), "ir" (a)
5260+ : "cc");
5261+
5262+ old = atomic64_cmpxchg(v, c, new);
5263 if (likely(old == c))
5264 break;
5265 c = old;
5266 }
5267- return c != (u);
5268+ return c != u;
5269 }
5270
5271 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5272diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
5273index 69358b5..9d0d492 100644
5274--- a/arch/sparc/include/asm/cache.h
5275+++ b/arch/sparc/include/asm/cache.h
5276@@ -7,10 +7,12 @@
5277 #ifndef _SPARC_CACHE_H
5278 #define _SPARC_CACHE_H
5279
5280+#include <linux/const.h>
5281+
5282 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
5283
5284 #define L1_CACHE_SHIFT 5
5285-#define L1_CACHE_BYTES 32
5286+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5287
5288 #ifdef CONFIG_SPARC32
5289 #define SMP_CACHE_BYTES_SHIFT 5
5290diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
5291index 4269ca6..e3da77f 100644
5292--- a/arch/sparc/include/asm/elf_32.h
5293+++ b/arch/sparc/include/asm/elf_32.h
5294@@ -114,6 +114,13 @@ typedef struct {
5295
5296 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
5297
5298+#ifdef CONFIG_PAX_ASLR
5299+#define PAX_ELF_ET_DYN_BASE 0x10000UL
5300+
5301+#define PAX_DELTA_MMAP_LEN 16
5302+#define PAX_DELTA_STACK_LEN 16
5303+#endif
5304+
5305 /* This yields a mask that user programs can use to figure out what
5306 instruction set this cpu supports. This can NOT be done in userspace
5307 on Sparc. */
5308diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
5309index 7df8b7f..4946269 100644
5310--- a/arch/sparc/include/asm/elf_64.h
5311+++ b/arch/sparc/include/asm/elf_64.h
5312@@ -180,6 +180,13 @@ typedef struct {
5313 #define ELF_ET_DYN_BASE 0x0000010000000000UL
5314 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
5315
5316+#ifdef CONFIG_PAX_ASLR
5317+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
5318+
5319+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
5320+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
5321+#endif
5322+
5323 extern unsigned long sparc64_elf_hwcap;
5324 #define ELF_HWCAP sparc64_elf_hwcap
5325
5326diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
5327index ca2b344..c6084f89 100644
5328--- a/arch/sparc/include/asm/pgalloc_32.h
5329+++ b/arch/sparc/include/asm/pgalloc_32.h
5330@@ -37,6 +37,7 @@ BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
5331 BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
5332 #define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
5333 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
5334+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
5335
5336 BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
5337 #define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address)
5338diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
5339index 40b2d7a..22a665b 100644
5340--- a/arch/sparc/include/asm/pgalloc_64.h
5341+++ b/arch/sparc/include/asm/pgalloc_64.h
5342@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
5343 }
5344
5345 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
5346+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
5347
5348 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5349 {
5350diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
5351index 3d71018..48a11c5 100644
5352--- a/arch/sparc/include/asm/pgtable_32.h
5353+++ b/arch/sparc/include/asm/pgtable_32.h
5354@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
5355 BTFIXUPDEF_INT(page_none)
5356 BTFIXUPDEF_INT(page_copy)
5357 BTFIXUPDEF_INT(page_readonly)
5358+
5359+#ifdef CONFIG_PAX_PAGEEXEC
5360+BTFIXUPDEF_INT(page_shared_noexec)
5361+BTFIXUPDEF_INT(page_copy_noexec)
5362+BTFIXUPDEF_INT(page_readonly_noexec)
5363+#endif
5364+
5365 BTFIXUPDEF_INT(page_kernel)
5366
5367 #define PMD_SHIFT SUN4C_PMD_SHIFT
5368@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
5369 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
5370 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
5371
5372+#ifdef CONFIG_PAX_PAGEEXEC
5373+extern pgprot_t PAGE_SHARED_NOEXEC;
5374+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
5375+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
5376+#else
5377+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5378+# define PAGE_COPY_NOEXEC PAGE_COPY
5379+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5380+#endif
5381+
5382 extern unsigned long page_kernel;
5383
5384 #ifdef MODULE
5385diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
5386index f6ae2b2..b03ffc7 100644
5387--- a/arch/sparc/include/asm/pgtsrmmu.h
5388+++ b/arch/sparc/include/asm/pgtsrmmu.h
5389@@ -115,6 +115,13 @@
5390 SRMMU_EXEC | SRMMU_REF)
5391 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
5392 SRMMU_EXEC | SRMMU_REF)
5393+
5394+#ifdef CONFIG_PAX_PAGEEXEC
5395+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
5396+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5397+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5398+#endif
5399+
5400 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
5401 SRMMU_DIRTY | SRMMU_REF)
5402
5403diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
5404index 9689176..63c18ea 100644
5405--- a/arch/sparc/include/asm/spinlock_64.h
5406+++ b/arch/sparc/include/asm/spinlock_64.h
5407@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
5408
5409 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
5410
5411-static void inline arch_read_lock(arch_rwlock_t *lock)
5412+static inline void arch_read_lock(arch_rwlock_t *lock)
5413 {
5414 unsigned long tmp1, tmp2;
5415
5416 __asm__ __volatile__ (
5417 "1: ldsw [%2], %0\n"
5418 " brlz,pn %0, 2f\n"
5419-"4: add %0, 1, %1\n"
5420+"4: addcc %0, 1, %1\n"
5421+
5422+#ifdef CONFIG_PAX_REFCOUNT
5423+" tvs %%icc, 6\n"
5424+#endif
5425+
5426 " cas [%2], %0, %1\n"
5427 " cmp %0, %1\n"
5428 " bne,pn %%icc, 1b\n"
5429@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
5430 " .previous"
5431 : "=&r" (tmp1), "=&r" (tmp2)
5432 : "r" (lock)
5433- : "memory");
5434+ : "memory", "cc");
5435 }
5436
5437-static int inline arch_read_trylock(arch_rwlock_t *lock)
5438+static inline int arch_read_trylock(arch_rwlock_t *lock)
5439 {
5440 int tmp1, tmp2;
5441
5442@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5443 "1: ldsw [%2], %0\n"
5444 " brlz,a,pn %0, 2f\n"
5445 " mov 0, %0\n"
5446-" add %0, 1, %1\n"
5447+" addcc %0, 1, %1\n"
5448+
5449+#ifdef CONFIG_PAX_REFCOUNT
5450+" tvs %%icc, 6\n"
5451+#endif
5452+
5453 " cas [%2], %0, %1\n"
5454 " cmp %0, %1\n"
5455 " bne,pn %%icc, 1b\n"
5456@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5457 return tmp1;
5458 }
5459
5460-static void inline arch_read_unlock(arch_rwlock_t *lock)
5461+static inline void arch_read_unlock(arch_rwlock_t *lock)
5462 {
5463 unsigned long tmp1, tmp2;
5464
5465 __asm__ __volatile__(
5466 "1: lduw [%2], %0\n"
5467-" sub %0, 1, %1\n"
5468+" subcc %0, 1, %1\n"
5469+
5470+#ifdef CONFIG_PAX_REFCOUNT
5471+" tvs %%icc, 6\n"
5472+#endif
5473+
5474 " cas [%2], %0, %1\n"
5475 " cmp %0, %1\n"
5476 " bne,pn %%xcc, 1b\n"
5477@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
5478 : "memory");
5479 }
5480
5481-static void inline arch_write_lock(arch_rwlock_t *lock)
5482+static inline void arch_write_lock(arch_rwlock_t *lock)
5483 {
5484 unsigned long mask, tmp1, tmp2;
5485
5486@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
5487 : "memory");
5488 }
5489
5490-static void inline arch_write_unlock(arch_rwlock_t *lock)
5491+static inline void arch_write_unlock(arch_rwlock_t *lock)
5492 {
5493 __asm__ __volatile__(
5494 " stw %%g0, [%0]"
5495@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
5496 : "memory");
5497 }
5498
5499-static int inline arch_write_trylock(arch_rwlock_t *lock)
5500+static inline int arch_write_trylock(arch_rwlock_t *lock)
5501 {
5502 unsigned long mask, tmp1, tmp2, result;
5503
5504diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5505index c2a1080..21ed218 100644
5506--- a/arch/sparc/include/asm/thread_info_32.h
5507+++ b/arch/sparc/include/asm/thread_info_32.h
5508@@ -50,6 +50,8 @@ struct thread_info {
5509 unsigned long w_saved;
5510
5511 struct restart_block restart_block;
5512+
5513+ unsigned long lowest_stack;
5514 };
5515
5516 /*
5517diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5518index 01d057f..13a7d2f 100644
5519--- a/arch/sparc/include/asm/thread_info_64.h
5520+++ b/arch/sparc/include/asm/thread_info_64.h
5521@@ -63,6 +63,8 @@ struct thread_info {
5522 struct pt_regs *kern_una_regs;
5523 unsigned int kern_una_insn;
5524
5525+ unsigned long lowest_stack;
5526+
5527 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5528 };
5529
5530@@ -214,10 +216,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
5531 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
5532 /* flag bit 6 is available */
5533 #define TIF_32BIT 7 /* 32-bit binary */
5534-/* flag bit 8 is available */
5535+#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
5536 #define TIF_SECCOMP 9 /* secure computing */
5537 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
5538 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
5539+
5540 /* NOTE: Thread flags >= 12 should be ones we have no interest
5541 * in using in assembly, else we can't use the mask as
5542 * an immediate value in instructions such as andcc.
5543@@ -236,12 +239,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
5544 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
5545 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
5546 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
5547+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5548
5549 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
5550 _TIF_DO_NOTIFY_RESUME_MASK | \
5551 _TIF_NEED_RESCHED)
5552 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
5553
5554+#define _TIF_WORK_SYSCALL \
5555+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
5556+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
5557+
5558+
5559 /*
5560 * Thread-synchronous status.
5561 *
5562diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5563index e88fbe5..96b0ce5 100644
5564--- a/arch/sparc/include/asm/uaccess.h
5565+++ b/arch/sparc/include/asm/uaccess.h
5566@@ -1,5 +1,13 @@
5567 #ifndef ___ASM_SPARC_UACCESS_H
5568 #define ___ASM_SPARC_UACCESS_H
5569+
5570+#ifdef __KERNEL__
5571+#ifndef __ASSEMBLY__
5572+#include <linux/types.h>
5573+extern void check_object_size(const void *ptr, unsigned long n, bool to);
5574+#endif
5575+#endif
5576+
5577 #if defined(__sparc__) && defined(__arch64__)
5578 #include <asm/uaccess_64.h>
5579 #else
5580diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5581index 8303ac4..07f333d 100644
5582--- a/arch/sparc/include/asm/uaccess_32.h
5583+++ b/arch/sparc/include/asm/uaccess_32.h
5584@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
5585
5586 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5587 {
5588- if (n && __access_ok((unsigned long) to, n))
5589+ if ((long)n < 0)
5590+ return n;
5591+
5592+ if (n && __access_ok((unsigned long) to, n)) {
5593+ if (!__builtin_constant_p(n))
5594+ check_object_size(from, n, true);
5595 return __copy_user(to, (__force void __user *) from, n);
5596- else
5597+ } else
5598 return n;
5599 }
5600
5601 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5602 {
5603+ if ((long)n < 0)
5604+ return n;
5605+
5606+ if (!__builtin_constant_p(n))
5607+ check_object_size(from, n, true);
5608+
5609 return __copy_user(to, (__force void __user *) from, n);
5610 }
5611
5612 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5613 {
5614- if (n && __access_ok((unsigned long) from, n))
5615+ if ((long)n < 0)
5616+ return n;
5617+
5618+ if (n && __access_ok((unsigned long) from, n)) {
5619+ if (!__builtin_constant_p(n))
5620+ check_object_size(to, n, false);
5621 return __copy_user((__force void __user *) to, from, n);
5622- else
5623+ } else
5624 return n;
5625 }
5626
5627 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5628 {
5629+ if ((long)n < 0)
5630+ return n;
5631+
5632 return __copy_user((__force void __user *) to, from, n);
5633 }
5634
5635diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5636index a1091afb..380228e 100644
5637--- a/arch/sparc/include/asm/uaccess_64.h
5638+++ b/arch/sparc/include/asm/uaccess_64.h
5639@@ -10,6 +10,7 @@
5640 #include <linux/compiler.h>
5641 #include <linux/string.h>
5642 #include <linux/thread_info.h>
5643+#include <linux/kernel.h>
5644 #include <asm/asi.h>
5645 #include <asm/spitfire.h>
5646 #include <asm-generic/uaccess-unaligned.h>
5647@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5648 static inline unsigned long __must_check
5649 copy_from_user(void *to, const void __user *from, unsigned long size)
5650 {
5651- unsigned long ret = ___copy_from_user(to, from, size);
5652+ unsigned long ret;
5653
5654+ if ((long)size < 0 || size > INT_MAX)
5655+ return size;
5656+
5657+ if (!__builtin_constant_p(size))
5658+ check_object_size(to, size, false);
5659+
5660+ ret = ___copy_from_user(to, from, size);
5661 if (unlikely(ret))
5662 ret = copy_from_user_fixup(to, from, size);
5663
5664@@ -229,8 +237,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5665 static inline unsigned long __must_check
5666 copy_to_user(void __user *to, const void *from, unsigned long size)
5667 {
5668- unsigned long ret = ___copy_to_user(to, from, size);
5669+ unsigned long ret;
5670
5671+ if ((long)size < 0 || size > INT_MAX)
5672+ return size;
5673+
5674+ if (!__builtin_constant_p(size))
5675+ check_object_size(from, size, true);
5676+
5677+ ret = ___copy_to_user(to, from, size);
5678 if (unlikely(ret))
5679 ret = copy_to_user_fixup(to, from, size);
5680 return ret;
5681diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5682index cb85458..e063f17 100644
5683--- a/arch/sparc/kernel/Makefile
5684+++ b/arch/sparc/kernel/Makefile
5685@@ -3,7 +3,7 @@
5686 #
5687
5688 asflags-y := -ansi
5689-ccflags-y := -Werror
5690+#ccflags-y := -Werror
5691
5692 extra-y := head_$(BITS).o
5693 extra-y += init_task.o
5694diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5695index efa0754..74b03fe 100644
5696--- a/arch/sparc/kernel/process_32.c
5697+++ b/arch/sparc/kernel/process_32.c
5698@@ -200,7 +200,7 @@ void __show_backtrace(unsigned long fp)
5699 rw->ins[4], rw->ins[5],
5700 rw->ins[6],
5701 rw->ins[7]);
5702- printk("%pS\n", (void *) rw->ins[7]);
5703+ printk("%pA\n", (void *) rw->ins[7]);
5704 rw = (struct reg_window32 *) rw->ins[6];
5705 }
5706 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
5707@@ -267,14 +267,14 @@ void show_regs(struct pt_regs *r)
5708
5709 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5710 r->psr, r->pc, r->npc, r->y, print_tainted());
5711- printk("PC: <%pS>\n", (void *) r->pc);
5712+ printk("PC: <%pA>\n", (void *) r->pc);
5713 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5714 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5715 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5716 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5717 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5718 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5719- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5720+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5721
5722 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5723 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5724@@ -309,7 +309,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5725 rw = (struct reg_window32 *) fp;
5726 pc = rw->ins[7];
5727 printk("[%08lx : ", pc);
5728- printk("%pS ] ", (void *) pc);
5729+ printk("%pA ] ", (void *) pc);
5730 fp = rw->ins[6];
5731 } while (++count < 16);
5732 printk("\n");
5733diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5734index aff0c72..9067b39 100644
5735--- a/arch/sparc/kernel/process_64.c
5736+++ b/arch/sparc/kernel/process_64.c
5737@@ -179,14 +179,14 @@ static void show_regwindow(struct pt_regs *regs)
5738 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5739 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5740 if (regs->tstate & TSTATE_PRIV)
5741- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5742+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5743 }
5744
5745 void show_regs(struct pt_regs *regs)
5746 {
5747 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5748 regs->tpc, regs->tnpc, regs->y, print_tainted());
5749- printk("TPC: <%pS>\n", (void *) regs->tpc);
5750+ printk("TPC: <%pA>\n", (void *) regs->tpc);
5751 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5752 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5753 regs->u_regs[3]);
5754@@ -199,7 +199,7 @@ void show_regs(struct pt_regs *regs)
5755 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5756 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5757 regs->u_regs[15]);
5758- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5759+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5760 show_regwindow(regs);
5761 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
5762 }
5763@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
5764 ((tp && tp->task) ? tp->task->pid : -1));
5765
5766 if (gp->tstate & TSTATE_PRIV) {
5767- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5768+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5769 (void *) gp->tpc,
5770 (void *) gp->o7,
5771 (void *) gp->i7,
5772diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
5773index 6f97c07..b1300ec 100644
5774--- a/arch/sparc/kernel/ptrace_64.c
5775+++ b/arch/sparc/kernel/ptrace_64.c
5776@@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
5777 return ret;
5778 }
5779
5780+#ifdef CONFIG_GRKERNSEC_SETXID
5781+extern void gr_delayed_cred_worker(void);
5782+#endif
5783+
5784 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5785 {
5786 int ret = 0;
5787@@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5788 /* do the secure computing check first */
5789 secure_computing(regs->u_regs[UREG_G1]);
5790
5791+#ifdef CONFIG_GRKERNSEC_SETXID
5792+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5793+ gr_delayed_cred_worker();
5794+#endif
5795+
5796 if (test_thread_flag(TIF_SYSCALL_TRACE))
5797 ret = tracehook_report_syscall_entry(regs);
5798
5799@@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5800
5801 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
5802 {
5803+#ifdef CONFIG_GRKERNSEC_SETXID
5804+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5805+ gr_delayed_cred_worker();
5806+#endif
5807+
5808 audit_syscall_exit(regs);
5809
5810 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
5811diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5812index 42b282f..28ce9f2 100644
5813--- a/arch/sparc/kernel/sys_sparc_32.c
5814+++ b/arch/sparc/kernel/sys_sparc_32.c
5815@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5816 if (ARCH_SUN4C && len > 0x20000000)
5817 return -ENOMEM;
5818 if (!addr)
5819- addr = TASK_UNMAPPED_BASE;
5820+ addr = current->mm->mmap_base;
5821
5822 if (flags & MAP_SHARED)
5823 addr = COLOUR_ALIGN(addr);
5824@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5825 }
5826 if (TASK_SIZE - PAGE_SIZE - len < addr)
5827 return -ENOMEM;
5828- if (!vmm || addr + len <= vmm->vm_start)
5829+ if (check_heap_stack_gap(vmm, addr, len))
5830 return addr;
5831 addr = vmm->vm_end;
5832 if (flags & MAP_SHARED)
5833diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5834index 3ee51f1..2ba4913 100644
5835--- a/arch/sparc/kernel/sys_sparc_64.c
5836+++ b/arch/sparc/kernel/sys_sparc_64.c
5837@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5838 /* We do not accept a shared mapping if it would violate
5839 * cache aliasing constraints.
5840 */
5841- if ((flags & MAP_SHARED) &&
5842+ if ((filp || (flags & MAP_SHARED)) &&
5843 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5844 return -EINVAL;
5845 return addr;
5846@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5847 if (filp || (flags & MAP_SHARED))
5848 do_color_align = 1;
5849
5850+#ifdef CONFIG_PAX_RANDMMAP
5851+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5852+#endif
5853+
5854 if (addr) {
5855 if (do_color_align)
5856 addr = COLOUR_ALIGN(addr, pgoff);
5857@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5858 addr = PAGE_ALIGN(addr);
5859
5860 vma = find_vma(mm, addr);
5861- if (task_size - len >= addr &&
5862- (!vma || addr + len <= vma->vm_start))
5863+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5864 return addr;
5865 }
5866
5867 if (len > mm->cached_hole_size) {
5868- start_addr = addr = mm->free_area_cache;
5869+ start_addr = addr = mm->free_area_cache;
5870 } else {
5871- start_addr = addr = TASK_UNMAPPED_BASE;
5872+ start_addr = addr = mm->mmap_base;
5873 mm->cached_hole_size = 0;
5874 }
5875
5876@@ -174,14 +177,14 @@ full_search:
5877 vma = find_vma(mm, VA_EXCLUDE_END);
5878 }
5879 if (unlikely(task_size < addr)) {
5880- if (start_addr != TASK_UNMAPPED_BASE) {
5881- start_addr = addr = TASK_UNMAPPED_BASE;
5882+ if (start_addr != mm->mmap_base) {
5883+ start_addr = addr = mm->mmap_base;
5884 mm->cached_hole_size = 0;
5885 goto full_search;
5886 }
5887 return -ENOMEM;
5888 }
5889- if (likely(!vma || addr + len <= vma->vm_start)) {
5890+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5891 /*
5892 * Remember the place where we stopped the search:
5893 */
5894@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5895 /* We do not accept a shared mapping if it would violate
5896 * cache aliasing constraints.
5897 */
5898- if ((flags & MAP_SHARED) &&
5899+ if ((filp || (flags & MAP_SHARED)) &&
5900 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5901 return -EINVAL;
5902 return addr;
5903@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5904 addr = PAGE_ALIGN(addr);
5905
5906 vma = find_vma(mm, addr);
5907- if (task_size - len >= addr &&
5908- (!vma || addr + len <= vma->vm_start))
5909+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5910 return addr;
5911 }
5912
5913@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5914 /* make sure it can fit in the remaining address space */
5915 if (likely(addr > len)) {
5916 vma = find_vma(mm, addr-len);
5917- if (!vma || addr <= vma->vm_start) {
5918+ if (check_heap_stack_gap(vma, addr - len, len)) {
5919 /* remember the address as a hint for next time */
5920 return (mm->free_area_cache = addr-len);
5921 }
5922@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5923 if (unlikely(mm->mmap_base < len))
5924 goto bottomup;
5925
5926- addr = mm->mmap_base-len;
5927- if (do_color_align)
5928- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5929+ addr = mm->mmap_base - len;
5930
5931 do {
5932+ if (do_color_align)
5933+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5934 /*
5935 * Lookup failure means no vma is above this address,
5936 * else if new region fits below vma->vm_start,
5937 * return with success:
5938 */
5939 vma = find_vma(mm, addr);
5940- if (likely(!vma || addr+len <= vma->vm_start)) {
5941+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5942 /* remember the address as a hint for next time */
5943 return (mm->free_area_cache = addr);
5944 }
5945@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5946 mm->cached_hole_size = vma->vm_start - addr;
5947
5948 /* try just below the current vma->vm_start */
5949- addr = vma->vm_start-len;
5950- if (do_color_align)
5951- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5952- } while (likely(len < vma->vm_start));
5953+ addr = skip_heap_stack_gap(vma, len);
5954+ } while (!IS_ERR_VALUE(addr));
5955
5956 bottomup:
5957 /*
5958@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5959 gap == RLIM_INFINITY ||
5960 sysctl_legacy_va_layout) {
5961 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5962+
5963+#ifdef CONFIG_PAX_RANDMMAP
5964+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5965+ mm->mmap_base += mm->delta_mmap;
5966+#endif
5967+
5968 mm->get_unmapped_area = arch_get_unmapped_area;
5969 mm->unmap_area = arch_unmap_area;
5970 } else {
5971@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5972 gap = (task_size / 6 * 5);
5973
5974 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5975+
5976+#ifdef CONFIG_PAX_RANDMMAP
5977+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5978+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5979+#endif
5980+
5981 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5982 mm->unmap_area = arch_unmap_area_topdown;
5983 }
5984diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
5985index 1d7e274..b39c527 100644
5986--- a/arch/sparc/kernel/syscalls.S
5987+++ b/arch/sparc/kernel/syscalls.S
5988@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
5989 #endif
5990 .align 32
5991 1: ldx [%g6 + TI_FLAGS], %l5
5992- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
5993+ andcc %l5, _TIF_WORK_SYSCALL, %g0
5994 be,pt %icc, rtrap
5995 nop
5996 call syscall_trace_leave
5997@@ -179,7 +179,7 @@ linux_sparc_syscall32:
5998
5999 srl %i5, 0, %o5 ! IEU1
6000 srl %i2, 0, %o2 ! IEU0 Group
6001- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6002+ andcc %l0, _TIF_WORK_SYSCALL, %g0
6003 bne,pn %icc, linux_syscall_trace32 ! CTI
6004 mov %i0, %l5 ! IEU1
6005 call %l7 ! CTI Group brk forced
6006@@ -202,7 +202,7 @@ linux_sparc_syscall:
6007
6008 mov %i3, %o3 ! IEU1
6009 mov %i4, %o4 ! IEU0 Group
6010- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6011+ andcc %l0, _TIF_WORK_SYSCALL, %g0
6012 bne,pn %icc, linux_syscall_trace ! CTI Group
6013 mov %i0, %l5 ! IEU0
6014 2: call %l7 ! CTI Group brk forced
6015@@ -226,7 +226,7 @@ ret_sys_call:
6016
6017 cmp %o0, -ERESTART_RESTARTBLOCK
6018 bgeu,pn %xcc, 1f
6019- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
6020+ andcc %l0, _TIF_WORK_SYSCALL, %l6
6021 80:
6022 /* System call success, clear Carry condition code. */
6023 andn %g3, %g2, %g3
6024@@ -241,7 +241,7 @@ ret_sys_call:
6025 /* System call failure, set Carry condition code.
6026 * Also, get abs(errno) to return to the process.
6027 */
6028- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
6029+ andcc %l0, _TIF_WORK_SYSCALL, %l6
6030 sub %g0, %o0, %o0
6031 or %g3, %g2, %g3
6032 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
6033diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
6034index d2de213..6b22bc3 100644
6035--- a/arch/sparc/kernel/traps_32.c
6036+++ b/arch/sparc/kernel/traps_32.c
6037@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
6038 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
6039 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
6040
6041+extern void gr_handle_kernel_exploit(void);
6042+
6043 void die_if_kernel(char *str, struct pt_regs *regs)
6044 {
6045 static int die_counter;
6046@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6047 count++ < 30 &&
6048 (((unsigned long) rw) >= PAGE_OFFSET) &&
6049 !(((unsigned long) rw) & 0x7)) {
6050- printk("Caller[%08lx]: %pS\n", rw->ins[7],
6051+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
6052 (void *) rw->ins[7]);
6053 rw = (struct reg_window32 *)rw->ins[6];
6054 }
6055 }
6056 printk("Instruction DUMP:");
6057 instruction_dump ((unsigned long *) regs->pc);
6058- if(regs->psr & PSR_PS)
6059+ if(regs->psr & PSR_PS) {
6060+ gr_handle_kernel_exploit();
6061 do_exit(SIGKILL);
6062+ }
6063 do_exit(SIGSEGV);
6064 }
6065
6066diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
6067index c72fdf5..743a344 100644
6068--- a/arch/sparc/kernel/traps_64.c
6069+++ b/arch/sparc/kernel/traps_64.c
6070@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
6071 i + 1,
6072 p->trapstack[i].tstate, p->trapstack[i].tpc,
6073 p->trapstack[i].tnpc, p->trapstack[i].tt);
6074- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
6075+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
6076 }
6077 }
6078
6079@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
6080
6081 lvl -= 0x100;
6082 if (regs->tstate & TSTATE_PRIV) {
6083+
6084+#ifdef CONFIG_PAX_REFCOUNT
6085+ if (lvl == 6)
6086+ pax_report_refcount_overflow(regs);
6087+#endif
6088+
6089 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
6090 die_if_kernel(buffer, regs);
6091 }
6092@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
6093 void bad_trap_tl1(struct pt_regs *regs, long lvl)
6094 {
6095 char buffer[32];
6096-
6097+
6098 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
6099 0, lvl, SIGTRAP) == NOTIFY_STOP)
6100 return;
6101
6102+#ifdef CONFIG_PAX_REFCOUNT
6103+ if (lvl == 6)
6104+ pax_report_refcount_overflow(regs);
6105+#endif
6106+
6107 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6108
6109 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
6110@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
6111 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
6112 printk("%s" "ERROR(%d): ",
6113 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
6114- printk("TPC<%pS>\n", (void *) regs->tpc);
6115+ printk("TPC<%pA>\n", (void *) regs->tpc);
6116 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
6117 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
6118 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
6119@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6120 smp_processor_id(),
6121 (type & 0x1) ? 'I' : 'D',
6122 regs->tpc);
6123- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
6124+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
6125 panic("Irrecoverable Cheetah+ parity error.");
6126 }
6127
6128@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6129 smp_processor_id(),
6130 (type & 0x1) ? 'I' : 'D',
6131 regs->tpc);
6132- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
6133+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
6134 }
6135
6136 struct sun4v_error_entry {
6137@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
6138
6139 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
6140 regs->tpc, tl);
6141- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
6142+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
6143 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6144- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
6145+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
6146 (void *) regs->u_regs[UREG_I7]);
6147 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
6148 "pte[%lx] error[%lx]\n",
6149@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
6150
6151 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
6152 regs->tpc, tl);
6153- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
6154+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
6155 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6156- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
6157+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
6158 (void *) regs->u_regs[UREG_I7]);
6159 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
6160 "pte[%lx] error[%lx]\n",
6161@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6162 fp = (unsigned long)sf->fp + STACK_BIAS;
6163 }
6164
6165- printk(" [%016lx] %pS\n", pc, (void *) pc);
6166+ printk(" [%016lx] %pA\n", pc, (void *) pc);
6167 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6168 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
6169 int index = tsk->curr_ret_stack;
6170 if (tsk->ret_stack && index >= graph) {
6171 pc = tsk->ret_stack[index - graph].ret;
6172- printk(" [%016lx] %pS\n", pc, (void *) pc);
6173+ printk(" [%016lx] %pA\n", pc, (void *) pc);
6174 graph++;
6175 }
6176 }
6177@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
6178 return (struct reg_window *) (fp + STACK_BIAS);
6179 }
6180
6181+extern void gr_handle_kernel_exploit(void);
6182+
6183 void die_if_kernel(char *str, struct pt_regs *regs)
6184 {
6185 static int die_counter;
6186@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6187 while (rw &&
6188 count++ < 30 &&
6189 kstack_valid(tp, (unsigned long) rw)) {
6190- printk("Caller[%016lx]: %pS\n", rw->ins[7],
6191+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
6192 (void *) rw->ins[7]);
6193
6194 rw = kernel_stack_up(rw);
6195@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6196 }
6197 user_instruction_dump ((unsigned int __user *) regs->tpc);
6198 }
6199- if (regs->tstate & TSTATE_PRIV)
6200+ if (regs->tstate & TSTATE_PRIV) {
6201+ gr_handle_kernel_exploit();
6202 do_exit(SIGKILL);
6203+ }
6204 do_exit(SIGSEGV);
6205 }
6206 EXPORT_SYMBOL(die_if_kernel);
6207diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
6208index dae85bc..af1e19d 100644
6209--- a/arch/sparc/kernel/unaligned_64.c
6210+++ b/arch/sparc/kernel/unaligned_64.c
6211@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
6212 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
6213
6214 if (__ratelimit(&ratelimit)) {
6215- printk("Kernel unaligned access at TPC[%lx] %pS\n",
6216+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
6217 regs->tpc, (void *) regs->tpc);
6218 }
6219 }
6220diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
6221index a3fc437..fea9957 100644
6222--- a/arch/sparc/lib/Makefile
6223+++ b/arch/sparc/lib/Makefile
6224@@ -2,7 +2,7 @@
6225 #
6226
6227 asflags-y := -ansi -DST_DIV0=0x02
6228-ccflags-y := -Werror
6229+#ccflags-y := -Werror
6230
6231 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
6232 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
6233diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
6234index 59186e0..f747d7a 100644
6235--- a/arch/sparc/lib/atomic_64.S
6236+++ b/arch/sparc/lib/atomic_64.S
6237@@ -18,7 +18,12 @@
6238 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6239 BACKOFF_SETUP(%o2)
6240 1: lduw [%o1], %g1
6241- add %g1, %o0, %g7
6242+ addcc %g1, %o0, %g7
6243+
6244+#ifdef CONFIG_PAX_REFCOUNT
6245+ tvs %icc, 6
6246+#endif
6247+
6248 cas [%o1], %g1, %g7
6249 cmp %g1, %g7
6250 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6251@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6252 2: BACKOFF_SPIN(%o2, %o3, 1b)
6253 .size atomic_add, .-atomic_add
6254
6255+ .globl atomic_add_unchecked
6256+ .type atomic_add_unchecked,#function
6257+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6258+ BACKOFF_SETUP(%o2)
6259+1: lduw [%o1], %g1
6260+ add %g1, %o0, %g7
6261+ cas [%o1], %g1, %g7
6262+ cmp %g1, %g7
6263+ bne,pn %icc, 2f
6264+ nop
6265+ retl
6266+ nop
6267+2: BACKOFF_SPIN(%o2, %o3, 1b)
6268+ .size atomic_add_unchecked, .-atomic_add_unchecked
6269+
6270 .globl atomic_sub
6271 .type atomic_sub,#function
6272 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6273 BACKOFF_SETUP(%o2)
6274 1: lduw [%o1], %g1
6275- sub %g1, %o0, %g7
6276+ subcc %g1, %o0, %g7
6277+
6278+#ifdef CONFIG_PAX_REFCOUNT
6279+ tvs %icc, 6
6280+#endif
6281+
6282 cas [%o1], %g1, %g7
6283 cmp %g1, %g7
6284 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6285@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6286 2: BACKOFF_SPIN(%o2, %o3, 1b)
6287 .size atomic_sub, .-atomic_sub
6288
6289+ .globl atomic_sub_unchecked
6290+ .type atomic_sub_unchecked,#function
6291+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6292+ BACKOFF_SETUP(%o2)
6293+1: lduw [%o1], %g1
6294+ sub %g1, %o0, %g7
6295+ cas [%o1], %g1, %g7
6296+ cmp %g1, %g7
6297+ bne,pn %icc, 2f
6298+ nop
6299+ retl
6300+ nop
6301+2: BACKOFF_SPIN(%o2, %o3, 1b)
6302+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
6303+
6304 .globl atomic_add_ret
6305 .type atomic_add_ret,#function
6306 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6307 BACKOFF_SETUP(%o2)
6308 1: lduw [%o1], %g1
6309- add %g1, %o0, %g7
6310+ addcc %g1, %o0, %g7
6311+
6312+#ifdef CONFIG_PAX_REFCOUNT
6313+ tvs %icc, 6
6314+#endif
6315+
6316 cas [%o1], %g1, %g7
6317 cmp %g1, %g7
6318 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6319@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6320 2: BACKOFF_SPIN(%o2, %o3, 1b)
6321 .size atomic_add_ret, .-atomic_add_ret
6322
6323+ .globl atomic_add_ret_unchecked
6324+ .type atomic_add_ret_unchecked,#function
6325+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6326+ BACKOFF_SETUP(%o2)
6327+1: lduw [%o1], %g1
6328+ addcc %g1, %o0, %g7
6329+ cas [%o1], %g1, %g7
6330+ cmp %g1, %g7
6331+ bne,pn %icc, 2f
6332+ add %g7, %o0, %g7
6333+ sra %g7, 0, %o0
6334+ retl
6335+ nop
6336+2: BACKOFF_SPIN(%o2, %o3, 1b)
6337+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
6338+
6339 .globl atomic_sub_ret
6340 .type atomic_sub_ret,#function
6341 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6342 BACKOFF_SETUP(%o2)
6343 1: lduw [%o1], %g1
6344- sub %g1, %o0, %g7
6345+ subcc %g1, %o0, %g7
6346+
6347+#ifdef CONFIG_PAX_REFCOUNT
6348+ tvs %icc, 6
6349+#endif
6350+
6351 cas [%o1], %g1, %g7
6352 cmp %g1, %g7
6353 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6354@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6355 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6356 BACKOFF_SETUP(%o2)
6357 1: ldx [%o1], %g1
6358- add %g1, %o0, %g7
6359+ addcc %g1, %o0, %g7
6360+
6361+#ifdef CONFIG_PAX_REFCOUNT
6362+ tvs %xcc, 6
6363+#endif
6364+
6365 casx [%o1], %g1, %g7
6366 cmp %g1, %g7
6367 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6368@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6369 2: BACKOFF_SPIN(%o2, %o3, 1b)
6370 .size atomic64_add, .-atomic64_add
6371
6372+ .globl atomic64_add_unchecked
6373+ .type atomic64_add_unchecked,#function
6374+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6375+ BACKOFF_SETUP(%o2)
6376+1: ldx [%o1], %g1
6377+ addcc %g1, %o0, %g7
6378+ casx [%o1], %g1, %g7
6379+ cmp %g1, %g7
6380+ bne,pn %xcc, 2f
6381+ nop
6382+ retl
6383+ nop
6384+2: BACKOFF_SPIN(%o2, %o3, 1b)
6385+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
6386+
6387 .globl atomic64_sub
6388 .type atomic64_sub,#function
6389 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6390 BACKOFF_SETUP(%o2)
6391 1: ldx [%o1], %g1
6392- sub %g1, %o0, %g7
6393+ subcc %g1, %o0, %g7
6394+
6395+#ifdef CONFIG_PAX_REFCOUNT
6396+ tvs %xcc, 6
6397+#endif
6398+
6399 casx [%o1], %g1, %g7
6400 cmp %g1, %g7
6401 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6402@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6403 2: BACKOFF_SPIN(%o2, %o3, 1b)
6404 .size atomic64_sub, .-atomic64_sub
6405
6406+ .globl atomic64_sub_unchecked
6407+ .type atomic64_sub_unchecked,#function
6408+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6409+ BACKOFF_SETUP(%o2)
6410+1: ldx [%o1], %g1
6411+ subcc %g1, %o0, %g7
6412+ casx [%o1], %g1, %g7
6413+ cmp %g1, %g7
6414+ bne,pn %xcc, 2f
6415+ nop
6416+ retl
6417+ nop
6418+2: BACKOFF_SPIN(%o2, %o3, 1b)
6419+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
6420+
6421 .globl atomic64_add_ret
6422 .type atomic64_add_ret,#function
6423 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6424 BACKOFF_SETUP(%o2)
6425 1: ldx [%o1], %g1
6426- add %g1, %o0, %g7
6427+ addcc %g1, %o0, %g7
6428+
6429+#ifdef CONFIG_PAX_REFCOUNT
6430+ tvs %xcc, 6
6431+#endif
6432+
6433 casx [%o1], %g1, %g7
6434 cmp %g1, %g7
6435 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6436@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6437 2: BACKOFF_SPIN(%o2, %o3, 1b)
6438 .size atomic64_add_ret, .-atomic64_add_ret
6439
6440+ .globl atomic64_add_ret_unchecked
6441+ .type atomic64_add_ret_unchecked,#function
6442+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6443+ BACKOFF_SETUP(%o2)
6444+1: ldx [%o1], %g1
6445+ addcc %g1, %o0, %g7
6446+ casx [%o1], %g1, %g7
6447+ cmp %g1, %g7
6448+ bne,pn %xcc, 2f
6449+ add %g7, %o0, %g7
6450+ mov %g7, %o0
6451+ retl
6452+ nop
6453+2: BACKOFF_SPIN(%o2, %o3, 1b)
6454+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
6455+
6456 .globl atomic64_sub_ret
6457 .type atomic64_sub_ret,#function
6458 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6459 BACKOFF_SETUP(%o2)
6460 1: ldx [%o1], %g1
6461- sub %g1, %o0, %g7
6462+ subcc %g1, %o0, %g7
6463+
6464+#ifdef CONFIG_PAX_REFCOUNT
6465+ tvs %xcc, 6
6466+#endif
6467+
6468 casx [%o1], %g1, %g7
6469 cmp %g1, %g7
6470 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6471diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
6472index f73c224..662af10 100644
6473--- a/arch/sparc/lib/ksyms.c
6474+++ b/arch/sparc/lib/ksyms.c
6475@@ -136,12 +136,18 @@ EXPORT_SYMBOL(__downgrade_write);
6476
6477 /* Atomic counter implementation. */
6478 EXPORT_SYMBOL(atomic_add);
6479+EXPORT_SYMBOL(atomic_add_unchecked);
6480 EXPORT_SYMBOL(atomic_add_ret);
6481+EXPORT_SYMBOL(atomic_add_ret_unchecked);
6482 EXPORT_SYMBOL(atomic_sub);
6483+EXPORT_SYMBOL(atomic_sub_unchecked);
6484 EXPORT_SYMBOL(atomic_sub_ret);
6485 EXPORT_SYMBOL(atomic64_add);
6486+EXPORT_SYMBOL(atomic64_add_unchecked);
6487 EXPORT_SYMBOL(atomic64_add_ret);
6488+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
6489 EXPORT_SYMBOL(atomic64_sub);
6490+EXPORT_SYMBOL(atomic64_sub_unchecked);
6491 EXPORT_SYMBOL(atomic64_sub_ret);
6492
6493 /* Atomic bit operations. */
6494diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
6495index 301421c..e2535d1 100644
6496--- a/arch/sparc/mm/Makefile
6497+++ b/arch/sparc/mm/Makefile
6498@@ -2,7 +2,7 @@
6499 #
6500
6501 asflags-y := -ansi
6502-ccflags-y := -Werror
6503+#ccflags-y := -Werror
6504
6505 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
6506 obj-y += fault_$(BITS).o
6507diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
6508index df3155a..eb708b8 100644
6509--- a/arch/sparc/mm/fault_32.c
6510+++ b/arch/sparc/mm/fault_32.c
6511@@ -21,6 +21,9 @@
6512 #include <linux/perf_event.h>
6513 #include <linux/interrupt.h>
6514 #include <linux/kdebug.h>
6515+#include <linux/slab.h>
6516+#include <linux/pagemap.h>
6517+#include <linux/compiler.h>
6518
6519 #include <asm/page.h>
6520 #include <asm/pgtable.h>
6521@@ -207,6 +210,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
6522 return safe_compute_effective_address(regs, insn);
6523 }
6524
6525+#ifdef CONFIG_PAX_PAGEEXEC
6526+#ifdef CONFIG_PAX_DLRESOLVE
6527+static void pax_emuplt_close(struct vm_area_struct *vma)
6528+{
6529+ vma->vm_mm->call_dl_resolve = 0UL;
6530+}
6531+
6532+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6533+{
6534+ unsigned int *kaddr;
6535+
6536+ vmf->page = alloc_page(GFP_HIGHUSER);
6537+ if (!vmf->page)
6538+ return VM_FAULT_OOM;
6539+
6540+ kaddr = kmap(vmf->page);
6541+ memset(kaddr, 0, PAGE_SIZE);
6542+ kaddr[0] = 0x9DE3BFA8U; /* save */
6543+ flush_dcache_page(vmf->page);
6544+ kunmap(vmf->page);
6545+ return VM_FAULT_MAJOR;
6546+}
6547+
6548+static const struct vm_operations_struct pax_vm_ops = {
6549+ .close = pax_emuplt_close,
6550+ .fault = pax_emuplt_fault
6551+};
6552+
6553+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6554+{
6555+ int ret;
6556+
6557+ INIT_LIST_HEAD(&vma->anon_vma_chain);
6558+ vma->vm_mm = current->mm;
6559+ vma->vm_start = addr;
6560+ vma->vm_end = addr + PAGE_SIZE;
6561+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6562+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6563+ vma->vm_ops = &pax_vm_ops;
6564+
6565+ ret = insert_vm_struct(current->mm, vma);
6566+ if (ret)
6567+ return ret;
6568+
6569+ ++current->mm->total_vm;
6570+ return 0;
6571+}
6572+#endif
6573+
6574+/*
6575+ * PaX: decide what to do with offenders (regs->pc = fault address)
6576+ *
6577+ * returns 1 when task should be killed
6578+ * 2 when patched PLT trampoline was detected
6579+ * 3 when unpatched PLT trampoline was detected
6580+ */
6581+static int pax_handle_fetch_fault(struct pt_regs *regs)
6582+{
6583+
6584+#ifdef CONFIG_PAX_EMUPLT
6585+ int err;
6586+
6587+ do { /* PaX: patched PLT emulation #1 */
6588+ unsigned int sethi1, sethi2, jmpl;
6589+
6590+ err = get_user(sethi1, (unsigned int *)regs->pc);
6591+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6592+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6593+
6594+ if (err)
6595+ break;
6596+
6597+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6598+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6599+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6600+ {
6601+ unsigned int addr;
6602+
6603+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6604+ addr = regs->u_regs[UREG_G1];
6605+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6606+ regs->pc = addr;
6607+ regs->npc = addr+4;
6608+ return 2;
6609+ }
6610+ } while (0);
6611+
6612+ { /* PaX: patched PLT emulation #2 */
6613+ unsigned int ba;
6614+
6615+ err = get_user(ba, (unsigned int *)regs->pc);
6616+
6617+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6618+ unsigned int addr;
6619+
6620+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6621+ regs->pc = addr;
6622+ regs->npc = addr+4;
6623+ return 2;
6624+ }
6625+ }
6626+
6627+ do { /* PaX: patched PLT emulation #3 */
6628+ unsigned int sethi, jmpl, nop;
6629+
6630+ err = get_user(sethi, (unsigned int *)regs->pc);
6631+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6632+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
6633+
6634+ if (err)
6635+ break;
6636+
6637+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6638+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6639+ nop == 0x01000000U)
6640+ {
6641+ unsigned int addr;
6642+
6643+ addr = (sethi & 0x003FFFFFU) << 10;
6644+ regs->u_regs[UREG_G1] = addr;
6645+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6646+ regs->pc = addr;
6647+ regs->npc = addr+4;
6648+ return 2;
6649+ }
6650+ } while (0);
6651+
6652+ do { /* PaX: unpatched PLT emulation step 1 */
6653+ unsigned int sethi, ba, nop;
6654+
6655+ err = get_user(sethi, (unsigned int *)regs->pc);
6656+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
6657+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
6658+
6659+ if (err)
6660+ break;
6661+
6662+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6663+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6664+ nop == 0x01000000U)
6665+ {
6666+ unsigned int addr, save, call;
6667+
6668+ if ((ba & 0xFFC00000U) == 0x30800000U)
6669+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6670+ else
6671+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6672+
6673+ err = get_user(save, (unsigned int *)addr);
6674+ err |= get_user(call, (unsigned int *)(addr+4));
6675+ err |= get_user(nop, (unsigned int *)(addr+8));
6676+ if (err)
6677+ break;
6678+
6679+#ifdef CONFIG_PAX_DLRESOLVE
6680+ if (save == 0x9DE3BFA8U &&
6681+ (call & 0xC0000000U) == 0x40000000U &&
6682+ nop == 0x01000000U)
6683+ {
6684+ struct vm_area_struct *vma;
6685+ unsigned long call_dl_resolve;
6686+
6687+ down_read(&current->mm->mmap_sem);
6688+ call_dl_resolve = current->mm->call_dl_resolve;
6689+ up_read(&current->mm->mmap_sem);
6690+ if (likely(call_dl_resolve))
6691+ goto emulate;
6692+
6693+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6694+
6695+ down_write(&current->mm->mmap_sem);
6696+ if (current->mm->call_dl_resolve) {
6697+ call_dl_resolve = current->mm->call_dl_resolve;
6698+ up_write(&current->mm->mmap_sem);
6699+ if (vma)
6700+ kmem_cache_free(vm_area_cachep, vma);
6701+ goto emulate;
6702+ }
6703+
6704+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6705+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6706+ up_write(&current->mm->mmap_sem);
6707+ if (vma)
6708+ kmem_cache_free(vm_area_cachep, vma);
6709+ return 1;
6710+ }
6711+
6712+ if (pax_insert_vma(vma, call_dl_resolve)) {
6713+ up_write(&current->mm->mmap_sem);
6714+ kmem_cache_free(vm_area_cachep, vma);
6715+ return 1;
6716+ }
6717+
6718+ current->mm->call_dl_resolve = call_dl_resolve;
6719+ up_write(&current->mm->mmap_sem);
6720+
6721+emulate:
6722+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6723+ regs->pc = call_dl_resolve;
6724+ regs->npc = addr+4;
6725+ return 3;
6726+ }
6727+#endif
6728+
6729+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6730+ if ((save & 0xFFC00000U) == 0x05000000U &&
6731+ (call & 0xFFFFE000U) == 0x85C0A000U &&
6732+ nop == 0x01000000U)
6733+ {
6734+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6735+ regs->u_regs[UREG_G2] = addr + 4;
6736+ addr = (save & 0x003FFFFFU) << 10;
6737+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6738+ regs->pc = addr;
6739+ regs->npc = addr+4;
6740+ return 3;
6741+ }
6742+ }
6743+ } while (0);
6744+
6745+ do { /* PaX: unpatched PLT emulation step 2 */
6746+ unsigned int save, call, nop;
6747+
6748+ err = get_user(save, (unsigned int *)(regs->pc-4));
6749+ err |= get_user(call, (unsigned int *)regs->pc);
6750+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
6751+ if (err)
6752+ break;
6753+
6754+ if (save == 0x9DE3BFA8U &&
6755+ (call & 0xC0000000U) == 0x40000000U &&
6756+ nop == 0x01000000U)
6757+ {
6758+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6759+
6760+ regs->u_regs[UREG_RETPC] = regs->pc;
6761+ regs->pc = dl_resolve;
6762+ regs->npc = dl_resolve+4;
6763+ return 3;
6764+ }
6765+ } while (0);
6766+#endif
6767+
6768+ return 1;
6769+}
6770+
6771+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6772+{
6773+ unsigned long i;
6774+
6775+ printk(KERN_ERR "PAX: bytes at PC: ");
6776+ for (i = 0; i < 8; i++) {
6777+ unsigned int c;
6778+ if (get_user(c, (unsigned int *)pc+i))
6779+ printk(KERN_CONT "???????? ");
6780+ else
6781+ printk(KERN_CONT "%08x ", c);
6782+ }
6783+ printk("\n");
6784+}
6785+#endif
6786+
6787 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
6788 int text_fault)
6789 {
6790@@ -282,6 +547,24 @@ good_area:
6791 if(!(vma->vm_flags & VM_WRITE))
6792 goto bad_area;
6793 } else {
6794+
6795+#ifdef CONFIG_PAX_PAGEEXEC
6796+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6797+ up_read(&mm->mmap_sem);
6798+ switch (pax_handle_fetch_fault(regs)) {
6799+
6800+#ifdef CONFIG_PAX_EMUPLT
6801+ case 2:
6802+ case 3:
6803+ return;
6804+#endif
6805+
6806+ }
6807+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6808+ do_group_exit(SIGKILL);
6809+ }
6810+#endif
6811+
6812 /* Allow reads even for write-only mappings */
6813 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6814 goto bad_area;
6815diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6816index 1fe0429..aee2e87 100644
6817--- a/arch/sparc/mm/fault_64.c
6818+++ b/arch/sparc/mm/fault_64.c
6819@@ -21,6 +21,9 @@
6820 #include <linux/kprobes.h>
6821 #include <linux/kdebug.h>
6822 #include <linux/percpu.h>
6823+#include <linux/slab.h>
6824+#include <linux/pagemap.h>
6825+#include <linux/compiler.h>
6826
6827 #include <asm/page.h>
6828 #include <asm/pgtable.h>
6829@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6830 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6831 regs->tpc);
6832 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6833- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6834+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6835 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6836 dump_stack();
6837 unhandled_fault(regs->tpc, current, regs);
6838@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
6839 show_regs(regs);
6840 }
6841
6842+#ifdef CONFIG_PAX_PAGEEXEC
6843+#ifdef CONFIG_PAX_DLRESOLVE
6844+static void pax_emuplt_close(struct vm_area_struct *vma)
6845+{
6846+ vma->vm_mm->call_dl_resolve = 0UL;
6847+}
6848+
6849+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6850+{
6851+ unsigned int *kaddr;
6852+
6853+ vmf->page = alloc_page(GFP_HIGHUSER);
6854+ if (!vmf->page)
6855+ return VM_FAULT_OOM;
6856+
6857+ kaddr = kmap(vmf->page);
6858+ memset(kaddr, 0, PAGE_SIZE);
6859+ kaddr[0] = 0x9DE3BFA8U; /* save */
6860+ flush_dcache_page(vmf->page);
6861+ kunmap(vmf->page);
6862+ return VM_FAULT_MAJOR;
6863+}
6864+
6865+static const struct vm_operations_struct pax_vm_ops = {
6866+ .close = pax_emuplt_close,
6867+ .fault = pax_emuplt_fault
6868+};
6869+
6870+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6871+{
6872+ int ret;
6873+
6874+ INIT_LIST_HEAD(&vma->anon_vma_chain);
6875+ vma->vm_mm = current->mm;
6876+ vma->vm_start = addr;
6877+ vma->vm_end = addr + PAGE_SIZE;
6878+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6879+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6880+ vma->vm_ops = &pax_vm_ops;
6881+
6882+ ret = insert_vm_struct(current->mm, vma);
6883+ if (ret)
6884+ return ret;
6885+
6886+ ++current->mm->total_vm;
6887+ return 0;
6888+}
6889+#endif
6890+
6891+/*
6892+ * PaX: decide what to do with offenders (regs->tpc = fault address)
6893+ *
6894+ * returns 1 when task should be killed
6895+ * 2 when patched PLT trampoline was detected
6896+ * 3 when unpatched PLT trampoline was detected
6897+ */
6898+static int pax_handle_fetch_fault(struct pt_regs *regs)
6899+{
6900+
6901+#ifdef CONFIG_PAX_EMUPLT
6902+ int err;
6903+
6904+ do { /* PaX: patched PLT emulation #1 */
6905+ unsigned int sethi1, sethi2, jmpl;
6906+
6907+ err = get_user(sethi1, (unsigned int *)regs->tpc);
6908+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6909+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6910+
6911+ if (err)
6912+ break;
6913+
6914+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6915+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6916+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6917+ {
6918+ unsigned long addr;
6919+
6920+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6921+ addr = regs->u_regs[UREG_G1];
6922+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6923+
6924+ if (test_thread_flag(TIF_32BIT))
6925+ addr &= 0xFFFFFFFFUL;
6926+
6927+ regs->tpc = addr;
6928+ regs->tnpc = addr+4;
6929+ return 2;
6930+ }
6931+ } while (0);
6932+
6933+ { /* PaX: patched PLT emulation #2 */
6934+ unsigned int ba;
6935+
6936+ err = get_user(ba, (unsigned int *)regs->tpc);
6937+
6938+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6939+ unsigned long addr;
6940+
6941+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6942+
6943+ if (test_thread_flag(TIF_32BIT))
6944+ addr &= 0xFFFFFFFFUL;
6945+
6946+ regs->tpc = addr;
6947+ regs->tnpc = addr+4;
6948+ return 2;
6949+ }
6950+ }
6951+
6952+ do { /* PaX: patched PLT emulation #3 */
6953+ unsigned int sethi, jmpl, nop;
6954+
6955+ err = get_user(sethi, (unsigned int *)regs->tpc);
6956+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6957+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6958+
6959+ if (err)
6960+ break;
6961+
6962+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6963+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6964+ nop == 0x01000000U)
6965+ {
6966+ unsigned long addr;
6967+
6968+ addr = (sethi & 0x003FFFFFU) << 10;
6969+ regs->u_regs[UREG_G1] = addr;
6970+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6971+
6972+ if (test_thread_flag(TIF_32BIT))
6973+ addr &= 0xFFFFFFFFUL;
6974+
6975+ regs->tpc = addr;
6976+ regs->tnpc = addr+4;
6977+ return 2;
6978+ }
6979+ } while (0);
6980+
6981+ do { /* PaX: patched PLT emulation #4 */
6982+ unsigned int sethi, mov1, call, mov2;
6983+
6984+ err = get_user(sethi, (unsigned int *)regs->tpc);
6985+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6986+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
6987+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6988+
6989+ if (err)
6990+ break;
6991+
6992+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6993+ mov1 == 0x8210000FU &&
6994+ (call & 0xC0000000U) == 0x40000000U &&
6995+ mov2 == 0x9E100001U)
6996+ {
6997+ unsigned long addr;
6998+
6999+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
7000+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7001+
7002+ if (test_thread_flag(TIF_32BIT))
7003+ addr &= 0xFFFFFFFFUL;
7004+
7005+ regs->tpc = addr;
7006+ regs->tnpc = addr+4;
7007+ return 2;
7008+ }
7009+ } while (0);
7010+
7011+ do { /* PaX: patched PLT emulation #5 */
7012+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
7013+
7014+ err = get_user(sethi, (unsigned int *)regs->tpc);
7015+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7016+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7017+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
7018+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
7019+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
7020+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
7021+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
7022+
7023+ if (err)
7024+ break;
7025+
7026+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7027+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
7028+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7029+ (or1 & 0xFFFFE000U) == 0x82106000U &&
7030+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
7031+ sllx == 0x83287020U &&
7032+ jmpl == 0x81C04005U &&
7033+ nop == 0x01000000U)
7034+ {
7035+ unsigned long addr;
7036+
7037+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7038+ regs->u_regs[UREG_G1] <<= 32;
7039+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7040+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7041+ regs->tpc = addr;
7042+ regs->tnpc = addr+4;
7043+ return 2;
7044+ }
7045+ } while (0);
7046+
7047+ do { /* PaX: patched PLT emulation #6 */
7048+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
7049+
7050+ err = get_user(sethi, (unsigned int *)regs->tpc);
7051+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7052+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7053+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
7054+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
7055+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
7056+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
7057+
7058+ if (err)
7059+ break;
7060+
7061+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7062+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
7063+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7064+ sllx == 0x83287020U &&
7065+ (or & 0xFFFFE000U) == 0x8A116000U &&
7066+ jmpl == 0x81C04005U &&
7067+ nop == 0x01000000U)
7068+ {
7069+ unsigned long addr;
7070+
7071+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
7072+ regs->u_regs[UREG_G1] <<= 32;
7073+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
7074+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7075+ regs->tpc = addr;
7076+ regs->tnpc = addr+4;
7077+ return 2;
7078+ }
7079+ } while (0);
7080+
7081+ do { /* PaX: unpatched PLT emulation step 1 */
7082+ unsigned int sethi, ba, nop;
7083+
7084+ err = get_user(sethi, (unsigned int *)regs->tpc);
7085+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7086+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7087+
7088+ if (err)
7089+ break;
7090+
7091+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7092+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7093+ nop == 0x01000000U)
7094+ {
7095+ unsigned long addr;
7096+ unsigned int save, call;
7097+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
7098+
7099+ if ((ba & 0xFFC00000U) == 0x30800000U)
7100+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7101+ else
7102+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7103+
7104+ if (test_thread_flag(TIF_32BIT))
7105+ addr &= 0xFFFFFFFFUL;
7106+
7107+ err = get_user(save, (unsigned int *)addr);
7108+ err |= get_user(call, (unsigned int *)(addr+4));
7109+ err |= get_user(nop, (unsigned int *)(addr+8));
7110+ if (err)
7111+ break;
7112+
7113+#ifdef CONFIG_PAX_DLRESOLVE
7114+ if (save == 0x9DE3BFA8U &&
7115+ (call & 0xC0000000U) == 0x40000000U &&
7116+ nop == 0x01000000U)
7117+ {
7118+ struct vm_area_struct *vma;
7119+ unsigned long call_dl_resolve;
7120+
7121+ down_read(&current->mm->mmap_sem);
7122+ call_dl_resolve = current->mm->call_dl_resolve;
7123+ up_read(&current->mm->mmap_sem);
7124+ if (likely(call_dl_resolve))
7125+ goto emulate;
7126+
7127+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7128+
7129+ down_write(&current->mm->mmap_sem);
7130+ if (current->mm->call_dl_resolve) {
7131+ call_dl_resolve = current->mm->call_dl_resolve;
7132+ up_write(&current->mm->mmap_sem);
7133+ if (vma)
7134+ kmem_cache_free(vm_area_cachep, vma);
7135+ goto emulate;
7136+ }
7137+
7138+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7139+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7140+ up_write(&current->mm->mmap_sem);
7141+ if (vma)
7142+ kmem_cache_free(vm_area_cachep, vma);
7143+ return 1;
7144+ }
7145+
7146+ if (pax_insert_vma(vma, call_dl_resolve)) {
7147+ up_write(&current->mm->mmap_sem);
7148+ kmem_cache_free(vm_area_cachep, vma);
7149+ return 1;
7150+ }
7151+
7152+ current->mm->call_dl_resolve = call_dl_resolve;
7153+ up_write(&current->mm->mmap_sem);
7154+
7155+emulate:
7156+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7157+ regs->tpc = call_dl_resolve;
7158+ regs->tnpc = addr+4;
7159+ return 3;
7160+ }
7161+#endif
7162+
7163+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7164+ if ((save & 0xFFC00000U) == 0x05000000U &&
7165+ (call & 0xFFFFE000U) == 0x85C0A000U &&
7166+ nop == 0x01000000U)
7167+ {
7168+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7169+ regs->u_regs[UREG_G2] = addr + 4;
7170+ addr = (save & 0x003FFFFFU) << 10;
7171+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7172+
7173+ if (test_thread_flag(TIF_32BIT))
7174+ addr &= 0xFFFFFFFFUL;
7175+
7176+ regs->tpc = addr;
7177+ regs->tnpc = addr+4;
7178+ return 3;
7179+ }
7180+
7181+ /* PaX: 64-bit PLT stub */
7182+ err = get_user(sethi1, (unsigned int *)addr);
7183+ err |= get_user(sethi2, (unsigned int *)(addr+4));
7184+ err |= get_user(or1, (unsigned int *)(addr+8));
7185+ err |= get_user(or2, (unsigned int *)(addr+12));
7186+ err |= get_user(sllx, (unsigned int *)(addr+16));
7187+ err |= get_user(add, (unsigned int *)(addr+20));
7188+ err |= get_user(jmpl, (unsigned int *)(addr+24));
7189+ err |= get_user(nop, (unsigned int *)(addr+28));
7190+ if (err)
7191+ break;
7192+
7193+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
7194+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7195+ (or1 & 0xFFFFE000U) == 0x88112000U &&
7196+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
7197+ sllx == 0x89293020U &&
7198+ add == 0x8A010005U &&
7199+ jmpl == 0x89C14000U &&
7200+ nop == 0x01000000U)
7201+ {
7202+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7203+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7204+ regs->u_regs[UREG_G4] <<= 32;
7205+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7206+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
7207+ regs->u_regs[UREG_G4] = addr + 24;
7208+ addr = regs->u_regs[UREG_G5];
7209+ regs->tpc = addr;
7210+ regs->tnpc = addr+4;
7211+ return 3;
7212+ }
7213+ }
7214+ } while (0);
7215+
7216+#ifdef CONFIG_PAX_DLRESOLVE
7217+ do { /* PaX: unpatched PLT emulation step 2 */
7218+ unsigned int save, call, nop;
7219+
7220+ err = get_user(save, (unsigned int *)(regs->tpc-4));
7221+ err |= get_user(call, (unsigned int *)regs->tpc);
7222+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
7223+ if (err)
7224+ break;
7225+
7226+ if (save == 0x9DE3BFA8U &&
7227+ (call & 0xC0000000U) == 0x40000000U &&
7228+ nop == 0x01000000U)
7229+ {
7230+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7231+
7232+ if (test_thread_flag(TIF_32BIT))
7233+ dl_resolve &= 0xFFFFFFFFUL;
7234+
7235+ regs->u_regs[UREG_RETPC] = regs->tpc;
7236+ regs->tpc = dl_resolve;
7237+ regs->tnpc = dl_resolve+4;
7238+ return 3;
7239+ }
7240+ } while (0);
7241+#endif
7242+
7243+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
7244+ unsigned int sethi, ba, nop;
7245+
7246+ err = get_user(sethi, (unsigned int *)regs->tpc);
7247+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7248+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7249+
7250+ if (err)
7251+ break;
7252+
7253+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7254+ (ba & 0xFFF00000U) == 0x30600000U &&
7255+ nop == 0x01000000U)
7256+ {
7257+ unsigned long addr;
7258+
7259+ addr = (sethi & 0x003FFFFFU) << 10;
7260+ regs->u_regs[UREG_G1] = addr;
7261+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7262+
7263+ if (test_thread_flag(TIF_32BIT))
7264+ addr &= 0xFFFFFFFFUL;
7265+
7266+ regs->tpc = addr;
7267+ regs->tnpc = addr+4;
7268+ return 2;
7269+ }
7270+ } while (0);
7271+
7272+#endif
7273+
7274+ return 1;
7275+}
7276+
7277+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7278+{
7279+ unsigned long i;
7280+
7281+ printk(KERN_ERR "PAX: bytes at PC: ");
7282+ for (i = 0; i < 8; i++) {
7283+ unsigned int c;
7284+ if (get_user(c, (unsigned int *)pc+i))
7285+ printk(KERN_CONT "???????? ");
7286+ else
7287+ printk(KERN_CONT "%08x ", c);
7288+ }
7289+ printk("\n");
7290+}
7291+#endif
7292+
7293 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7294 {
7295 struct mm_struct *mm = current->mm;
7296@@ -343,6 +797,29 @@ retry:
7297 if (!vma)
7298 goto bad_area;
7299
7300+#ifdef CONFIG_PAX_PAGEEXEC
7301+ /* PaX: detect ITLB misses on non-exec pages */
7302+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
7303+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
7304+ {
7305+ if (address != regs->tpc)
7306+ goto good_area;
7307+
7308+ up_read(&mm->mmap_sem);
7309+ switch (pax_handle_fetch_fault(regs)) {
7310+
7311+#ifdef CONFIG_PAX_EMUPLT
7312+ case 2:
7313+ case 3:
7314+ return;
7315+#endif
7316+
7317+ }
7318+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
7319+ do_group_exit(SIGKILL);
7320+ }
7321+#endif
7322+
7323 /* Pure DTLB misses do not tell us whether the fault causing
7324 * load/store/atomic was a write or not, it only says that there
7325 * was no match. So in such a case we (carefully) read the
7326diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
7327index 07e1453..0a7d9e9 100644
7328--- a/arch/sparc/mm/hugetlbpage.c
7329+++ b/arch/sparc/mm/hugetlbpage.c
7330@@ -67,7 +67,7 @@ full_search:
7331 }
7332 return -ENOMEM;
7333 }
7334- if (likely(!vma || addr + len <= vma->vm_start)) {
7335+ if (likely(check_heap_stack_gap(vma, addr, len))) {
7336 /*
7337 * Remember the place where we stopped the search:
7338 */
7339@@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7340 /* make sure it can fit in the remaining address space */
7341 if (likely(addr > len)) {
7342 vma = find_vma(mm, addr-len);
7343- if (!vma || addr <= vma->vm_start) {
7344+ if (check_heap_stack_gap(vma, addr - len, len)) {
7345 /* remember the address as a hint for next time */
7346 return (mm->free_area_cache = addr-len);
7347 }
7348@@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7349 if (unlikely(mm->mmap_base < len))
7350 goto bottomup;
7351
7352- addr = (mm->mmap_base-len) & HPAGE_MASK;
7353+ addr = mm->mmap_base - len;
7354
7355 do {
7356+ addr &= HPAGE_MASK;
7357 /*
7358 * Lookup failure means no vma is above this address,
7359 * else if new region fits below vma->vm_start,
7360 * return with success:
7361 */
7362 vma = find_vma(mm, addr);
7363- if (likely(!vma || addr+len <= vma->vm_start)) {
7364+ if (likely(check_heap_stack_gap(vma, addr, len))) {
7365 /* remember the address as a hint for next time */
7366 return (mm->free_area_cache = addr);
7367 }
7368@@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7369 mm->cached_hole_size = vma->vm_start - addr;
7370
7371 /* try just below the current vma->vm_start */
7372- addr = (vma->vm_start-len) & HPAGE_MASK;
7373- } while (likely(len < vma->vm_start));
7374+ addr = skip_heap_stack_gap(vma, len);
7375+ } while (!IS_ERR_VALUE(addr));
7376
7377 bottomup:
7378 /*
7379@@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
7380 if (addr) {
7381 addr = ALIGN(addr, HPAGE_SIZE);
7382 vma = find_vma(mm, addr);
7383- if (task_size - len >= addr &&
7384- (!vma || addr + len <= vma->vm_start))
7385+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
7386 return addr;
7387 }
7388 if (mm->get_unmapped_area == arch_get_unmapped_area)
7389diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
7390index c5f9021..7591bae 100644
7391--- a/arch/sparc/mm/init_32.c
7392+++ b/arch/sparc/mm/init_32.c
7393@@ -315,6 +315,9 @@ extern void device_scan(void);
7394 pgprot_t PAGE_SHARED __read_mostly;
7395 EXPORT_SYMBOL(PAGE_SHARED);
7396
7397+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
7398+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
7399+
7400 void __init paging_init(void)
7401 {
7402 switch(sparc_cpu_model) {
7403@@ -343,17 +346,17 @@ void __init paging_init(void)
7404
7405 /* Initialize the protection map with non-constant, MMU dependent values. */
7406 protection_map[0] = PAGE_NONE;
7407- protection_map[1] = PAGE_READONLY;
7408- protection_map[2] = PAGE_COPY;
7409- protection_map[3] = PAGE_COPY;
7410+ protection_map[1] = PAGE_READONLY_NOEXEC;
7411+ protection_map[2] = PAGE_COPY_NOEXEC;
7412+ protection_map[3] = PAGE_COPY_NOEXEC;
7413 protection_map[4] = PAGE_READONLY;
7414 protection_map[5] = PAGE_READONLY;
7415 protection_map[6] = PAGE_COPY;
7416 protection_map[7] = PAGE_COPY;
7417 protection_map[8] = PAGE_NONE;
7418- protection_map[9] = PAGE_READONLY;
7419- protection_map[10] = PAGE_SHARED;
7420- protection_map[11] = PAGE_SHARED;
7421+ protection_map[9] = PAGE_READONLY_NOEXEC;
7422+ protection_map[10] = PAGE_SHARED_NOEXEC;
7423+ protection_map[11] = PAGE_SHARED_NOEXEC;
7424 protection_map[12] = PAGE_READONLY;
7425 protection_map[13] = PAGE_READONLY;
7426 protection_map[14] = PAGE_SHARED;
7427diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
7428index cbef74e..c38fead 100644
7429--- a/arch/sparc/mm/srmmu.c
7430+++ b/arch/sparc/mm/srmmu.c
7431@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
7432 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
7433 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
7434 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
7435+
7436+#ifdef CONFIG_PAX_PAGEEXEC
7437+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
7438+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
7439+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
7440+#endif
7441+
7442 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
7443 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
7444
7445diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
7446index f4500c6..889656c 100644
7447--- a/arch/tile/include/asm/atomic_64.h
7448+++ b/arch/tile/include/asm/atomic_64.h
7449@@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
7450
7451 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7452
7453+#define atomic64_read_unchecked(v) atomic64_read(v)
7454+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7455+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7456+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7457+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7458+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7459+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7460+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7461+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7462+
7463 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
7464 #define smp_mb__before_atomic_dec() smp_mb()
7465 #define smp_mb__after_atomic_dec() smp_mb()
7466diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
7467index 392e533..536b092 100644
7468--- a/arch/tile/include/asm/cache.h
7469+++ b/arch/tile/include/asm/cache.h
7470@@ -15,11 +15,12 @@
7471 #ifndef _ASM_TILE_CACHE_H
7472 #define _ASM_TILE_CACHE_H
7473
7474+#include <linux/const.h>
7475 #include <arch/chip.h>
7476
7477 /* bytes per L1 data cache line */
7478 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
7479-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7480+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7481
7482 /* bytes per L2 cache line */
7483 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
7484diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
7485index ef34d2c..d6ce60c 100644
7486--- a/arch/tile/include/asm/uaccess.h
7487+++ b/arch/tile/include/asm/uaccess.h
7488@@ -361,9 +361,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
7489 const void __user *from,
7490 unsigned long n)
7491 {
7492- int sz = __compiletime_object_size(to);
7493+ size_t sz = __compiletime_object_size(to);
7494
7495- if (likely(sz == -1 || sz >= n))
7496+ if (likely(sz == (size_t)-1 || sz >= n))
7497 n = _copy_from_user(to, from, n);
7498 else
7499 copy_from_user_overflow();
7500diff --git a/arch/um/Makefile b/arch/um/Makefile
7501index 55c0661..86ad413 100644
7502--- a/arch/um/Makefile
7503+++ b/arch/um/Makefile
7504@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
7505 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
7506 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
7507
7508+ifdef CONSTIFY_PLUGIN
7509+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7510+endif
7511+
7512 #This will adjust *FLAGS accordingly to the platform.
7513 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
7514
7515diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
7516index 19e1bdd..3665b77 100644
7517--- a/arch/um/include/asm/cache.h
7518+++ b/arch/um/include/asm/cache.h
7519@@ -1,6 +1,7 @@
7520 #ifndef __UM_CACHE_H
7521 #define __UM_CACHE_H
7522
7523+#include <linux/const.h>
7524
7525 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
7526 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7527@@ -12,6 +13,6 @@
7528 # define L1_CACHE_SHIFT 5
7529 #endif
7530
7531-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7532+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7533
7534 #endif
7535diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
7536index 6c03acd..a5e0215 100644
7537--- a/arch/um/include/asm/kmap_types.h
7538+++ b/arch/um/include/asm/kmap_types.h
7539@@ -23,6 +23,7 @@ enum km_type {
7540 KM_IRQ1,
7541 KM_SOFTIRQ0,
7542 KM_SOFTIRQ1,
7543+ KM_CLEARPAGE,
7544 KM_TYPE_NR
7545 };
7546
7547diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
7548index 7cfc3ce..cbd1a58 100644
7549--- a/arch/um/include/asm/page.h
7550+++ b/arch/um/include/asm/page.h
7551@@ -14,6 +14,9 @@
7552 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
7553 #define PAGE_MASK (~(PAGE_SIZE-1))
7554
7555+#define ktla_ktva(addr) (addr)
7556+#define ktva_ktla(addr) (addr)
7557+
7558 #ifndef __ASSEMBLY__
7559
7560 struct page;
7561diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
7562index 0032f92..cd151e0 100644
7563--- a/arch/um/include/asm/pgtable-3level.h
7564+++ b/arch/um/include/asm/pgtable-3level.h
7565@@ -58,6 +58,7 @@
7566 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
7567 #define pud_populate(mm, pud, pmd) \
7568 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
7569+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
7570
7571 #ifdef CONFIG_64BIT
7572 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
7573diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
7574index 2b73ded..804f540 100644
7575--- a/arch/um/kernel/process.c
7576+++ b/arch/um/kernel/process.c
7577@@ -404,22 +404,6 @@ int singlestepping(void * t)
7578 return 2;
7579 }
7580
7581-/*
7582- * Only x86 and x86_64 have an arch_align_stack().
7583- * All other arches have "#define arch_align_stack(x) (x)"
7584- * in their asm/system.h
7585- * As this is included in UML from asm-um/system-generic.h,
7586- * we can use it to behave as the subarch does.
7587- */
7588-#ifndef arch_align_stack
7589-unsigned long arch_align_stack(unsigned long sp)
7590-{
7591- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7592- sp -= get_random_int() % 8192;
7593- return sp & ~0xf;
7594-}
7595-#endif
7596-
7597 unsigned long get_wchan(struct task_struct *p)
7598 {
7599 unsigned long stack_page, sp, ip;
7600diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
7601index ad8f795..2c7eec6 100644
7602--- a/arch/unicore32/include/asm/cache.h
7603+++ b/arch/unicore32/include/asm/cache.h
7604@@ -12,8 +12,10 @@
7605 #ifndef __UNICORE_CACHE_H__
7606 #define __UNICORE_CACHE_H__
7607
7608-#define L1_CACHE_SHIFT (5)
7609-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7610+#include <linux/const.h>
7611+
7612+#define L1_CACHE_SHIFT 5
7613+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7614
7615 /*
7616 * Memory returned by kmalloc() may be used for DMA, so we must make
7617diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7618index c9866b0..fe53aef 100644
7619--- a/arch/x86/Kconfig
7620+++ b/arch/x86/Kconfig
7621@@ -229,7 +229,7 @@ config X86_HT
7622
7623 config X86_32_LAZY_GS
7624 def_bool y
7625- depends on X86_32 && !CC_STACKPROTECTOR
7626+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7627
7628 config ARCH_HWEIGHT_CFLAGS
7629 string
7630@@ -1042,7 +1042,7 @@ choice
7631
7632 config NOHIGHMEM
7633 bool "off"
7634- depends on !X86_NUMAQ
7635+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7636 ---help---
7637 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7638 However, the address space of 32-bit x86 processors is only 4
7639@@ -1079,7 +1079,7 @@ config NOHIGHMEM
7640
7641 config HIGHMEM4G
7642 bool "4GB"
7643- depends on !X86_NUMAQ
7644+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7645 ---help---
7646 Select this if you have a 32-bit processor and between 1 and 4
7647 gigabytes of physical RAM.
7648@@ -1133,7 +1133,7 @@ config PAGE_OFFSET
7649 hex
7650 default 0xB0000000 if VMSPLIT_3G_OPT
7651 default 0x80000000 if VMSPLIT_2G
7652- default 0x78000000 if VMSPLIT_2G_OPT
7653+ default 0x70000000 if VMSPLIT_2G_OPT
7654 default 0x40000000 if VMSPLIT_1G
7655 default 0xC0000000
7656 depends on X86_32
7657@@ -1523,6 +1523,7 @@ config SECCOMP
7658
7659 config CC_STACKPROTECTOR
7660 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7661+ depends on X86_64 || !PAX_MEMORY_UDEREF
7662 ---help---
7663 This option turns on the -fstack-protector GCC feature. This
7664 feature puts, at the beginning of functions, a canary value on
7665@@ -1580,6 +1581,7 @@ config KEXEC_JUMP
7666 config PHYSICAL_START
7667 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
7668 default "0x1000000"
7669+ range 0x400000 0x40000000
7670 ---help---
7671 This gives the physical address where the kernel is loaded.
7672
7673@@ -1643,6 +1645,7 @@ config X86_NEED_RELOCS
7674 config PHYSICAL_ALIGN
7675 hex "Alignment value to which kernel should be aligned" if X86_32
7676 default "0x1000000"
7677+ range 0x400000 0x1000000 if PAX_KERNEXEC
7678 range 0x2000 0x1000000
7679 ---help---
7680 This value puts the alignment restrictions on physical address
7681@@ -1674,9 +1677,10 @@ config HOTPLUG_CPU
7682 Say N if you want to disable CPU hotplug.
7683
7684 config COMPAT_VDSO
7685- def_bool y
7686+ def_bool n
7687 prompt "Compat VDSO support"
7688 depends on X86_32 || IA32_EMULATION
7689+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7690 ---help---
7691 Map the 32-bit VDSO to the predictable old-style address too.
7692
7693diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7694index 706e12e..62e4feb 100644
7695--- a/arch/x86/Kconfig.cpu
7696+++ b/arch/x86/Kconfig.cpu
7697@@ -334,7 +334,7 @@ config X86_PPRO_FENCE
7698
7699 config X86_F00F_BUG
7700 def_bool y
7701- depends on M586MMX || M586TSC || M586 || M486 || M386
7702+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7703
7704 config X86_INVD_BUG
7705 def_bool y
7706@@ -358,7 +358,7 @@ config X86_POPAD_OK
7707
7708 config X86_ALIGNMENT_16
7709 def_bool y
7710- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7711+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7712
7713 config X86_INTEL_USERCOPY
7714 def_bool y
7715@@ -404,7 +404,7 @@ config X86_CMPXCHG64
7716 # generates cmov.
7717 config X86_CMOV
7718 def_bool y
7719- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7720+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7721
7722 config X86_MINIMUM_CPU_FAMILY
7723 int
7724diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7725index e46c214..7c72b55 100644
7726--- a/arch/x86/Kconfig.debug
7727+++ b/arch/x86/Kconfig.debug
7728@@ -84,7 +84,7 @@ config X86_PTDUMP
7729 config DEBUG_RODATA
7730 bool "Write protect kernel read-only data structures"
7731 default y
7732- depends on DEBUG_KERNEL
7733+ depends on DEBUG_KERNEL && BROKEN
7734 ---help---
7735 Mark the kernel read-only data as write-protected in the pagetables,
7736 in order to catch accidental (and incorrect) writes to such const
7737@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
7738
7739 config DEBUG_SET_MODULE_RONX
7740 bool "Set loadable kernel module data as NX and text as RO"
7741- depends on MODULES
7742+ depends on MODULES && BROKEN
7743 ---help---
7744 This option helps catch unintended modifications to loadable
7745 kernel module's text and read-only data. It also prevents execution
7746diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7747index b1c611e..2c1a823 100644
7748--- a/arch/x86/Makefile
7749+++ b/arch/x86/Makefile
7750@@ -46,6 +46,7 @@ else
7751 UTS_MACHINE := x86_64
7752 CHECKFLAGS += -D__x86_64__ -m64
7753
7754+ biarch := $(call cc-option,-m64)
7755 KBUILD_AFLAGS += -m64
7756 KBUILD_CFLAGS += -m64
7757
7758@@ -222,3 +223,12 @@ define archhelp
7759 echo ' FDARGS="..." arguments for the booted kernel'
7760 echo ' FDINITRD=file initrd for the booted kernel'
7761 endef
7762+
7763+define OLD_LD
7764+
7765+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7766+*** Please upgrade your binutils to 2.18 or newer
7767+endef
7768+
7769+archprepare:
7770+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7771diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7772index 5a747dd..ff7b12c 100644
7773--- a/arch/x86/boot/Makefile
7774+++ b/arch/x86/boot/Makefile
7775@@ -64,6 +64,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7776 $(call cc-option, -fno-stack-protector) \
7777 $(call cc-option, -mpreferred-stack-boundary=2)
7778 KBUILD_CFLAGS += $(call cc-option, -m32)
7779+ifdef CONSTIFY_PLUGIN
7780+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7781+endif
7782 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7783 GCOV_PROFILE := n
7784
7785diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7786index 878e4b9..20537ab 100644
7787--- a/arch/x86/boot/bitops.h
7788+++ b/arch/x86/boot/bitops.h
7789@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7790 u8 v;
7791 const u32 *p = (const u32 *)addr;
7792
7793- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7794+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7795 return v;
7796 }
7797
7798@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7799
7800 static inline void set_bit(int nr, void *addr)
7801 {
7802- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7803+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7804 }
7805
7806 #endif /* BOOT_BITOPS_H */
7807diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7808index 18997e5..83d9c67 100644
7809--- a/arch/x86/boot/boot.h
7810+++ b/arch/x86/boot/boot.h
7811@@ -85,7 +85,7 @@ static inline void io_delay(void)
7812 static inline u16 ds(void)
7813 {
7814 u16 seg;
7815- asm("movw %%ds,%0" : "=rm" (seg));
7816+ asm volatile("movw %%ds,%0" : "=rm" (seg));
7817 return seg;
7818 }
7819
7820@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7821 static inline int memcmp(const void *s1, const void *s2, size_t len)
7822 {
7823 u8 diff;
7824- asm("repe; cmpsb; setnz %0"
7825+ asm volatile("repe; cmpsb; setnz %0"
7826 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7827 return diff;
7828 }
7829diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7830index e398bb5..3a382ca 100644
7831--- a/arch/x86/boot/compressed/Makefile
7832+++ b/arch/x86/boot/compressed/Makefile
7833@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7834 KBUILD_CFLAGS += $(cflags-y)
7835 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7836 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7837+ifdef CONSTIFY_PLUGIN
7838+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7839+endif
7840
7841 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7842 GCOV_PROFILE := n
7843diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
7844index 0cdfc0d..6e79437 100644
7845--- a/arch/x86/boot/compressed/eboot.c
7846+++ b/arch/x86/boot/compressed/eboot.c
7847@@ -122,7 +122,6 @@ again:
7848 *addr = max_addr;
7849 }
7850
7851-free_pool:
7852 efi_call_phys1(sys_table->boottime->free_pool, map);
7853
7854 fail:
7855@@ -186,7 +185,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
7856 if (i == map_size / desc_size)
7857 status = EFI_NOT_FOUND;
7858
7859-free_pool:
7860 efi_call_phys1(sys_table->boottime->free_pool, map);
7861 fail:
7862 return status;
7863diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7864index c85e3ac..6f5aa80 100644
7865--- a/arch/x86/boot/compressed/head_32.S
7866+++ b/arch/x86/boot/compressed/head_32.S
7867@@ -106,7 +106,7 @@ preferred_addr:
7868 notl %eax
7869 andl %eax, %ebx
7870 #else
7871- movl $LOAD_PHYSICAL_ADDR, %ebx
7872+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7873 #endif
7874
7875 /* Target address to relocate to for decompression */
7876@@ -192,7 +192,7 @@ relocated:
7877 * and where it was actually loaded.
7878 */
7879 movl %ebp, %ebx
7880- subl $LOAD_PHYSICAL_ADDR, %ebx
7881+ subl $____LOAD_PHYSICAL_ADDR, %ebx
7882 jz 2f /* Nothing to be done if loaded at compiled addr. */
7883 /*
7884 * Process relocations.
7885@@ -200,8 +200,7 @@ relocated:
7886
7887 1: subl $4, %edi
7888 movl (%edi), %ecx
7889- testl %ecx, %ecx
7890- jz 2f
7891+ jecxz 2f
7892 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7893 jmp 1b
7894 2:
7895diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7896index 87e03a1..0d94c76 100644
7897--- a/arch/x86/boot/compressed/head_64.S
7898+++ b/arch/x86/boot/compressed/head_64.S
7899@@ -91,7 +91,7 @@ ENTRY(startup_32)
7900 notl %eax
7901 andl %eax, %ebx
7902 #else
7903- movl $LOAD_PHYSICAL_ADDR, %ebx
7904+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7905 #endif
7906
7907 /* Target address to relocate to for decompression */
7908@@ -263,7 +263,7 @@ preferred_addr:
7909 notq %rax
7910 andq %rax, %rbp
7911 #else
7912- movq $LOAD_PHYSICAL_ADDR, %rbp
7913+ movq $____LOAD_PHYSICAL_ADDR, %rbp
7914 #endif
7915
7916 /* Target address to relocate to for decompression */
7917diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7918index 7116dcb..d9ae1d7 100644
7919--- a/arch/x86/boot/compressed/misc.c
7920+++ b/arch/x86/boot/compressed/misc.c
7921@@ -310,7 +310,7 @@ static void parse_elf(void *output)
7922 case PT_LOAD:
7923 #ifdef CONFIG_RELOCATABLE
7924 dest = output;
7925- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7926+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7927 #else
7928 dest = (void *)(phdr->p_paddr);
7929 #endif
7930@@ -365,7 +365,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7931 error("Destination address too large");
7932 #endif
7933 #ifndef CONFIG_RELOCATABLE
7934- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7935+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7936 error("Wrong destination address");
7937 #endif
7938
7939diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7940index 4d3ff03..e4972ff 100644
7941--- a/arch/x86/boot/cpucheck.c
7942+++ b/arch/x86/boot/cpucheck.c
7943@@ -74,7 +74,7 @@ static int has_fpu(void)
7944 u16 fcw = -1, fsw = -1;
7945 u32 cr0;
7946
7947- asm("movl %%cr0,%0" : "=r" (cr0));
7948+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
7949 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7950 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7951 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7952@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7953 {
7954 u32 f0, f1;
7955
7956- asm("pushfl ; "
7957+ asm volatile("pushfl ; "
7958 "pushfl ; "
7959 "popl %0 ; "
7960 "movl %0,%1 ; "
7961@@ -115,7 +115,7 @@ static void get_flags(void)
7962 set_bit(X86_FEATURE_FPU, cpu.flags);
7963
7964 if (has_eflag(X86_EFLAGS_ID)) {
7965- asm("cpuid"
7966+ asm volatile("cpuid"
7967 : "=a" (max_intel_level),
7968 "=b" (cpu_vendor[0]),
7969 "=d" (cpu_vendor[1]),
7970@@ -124,7 +124,7 @@ static void get_flags(void)
7971
7972 if (max_intel_level >= 0x00000001 &&
7973 max_intel_level <= 0x0000ffff) {
7974- asm("cpuid"
7975+ asm volatile("cpuid"
7976 : "=a" (tfms),
7977 "=c" (cpu.flags[4]),
7978 "=d" (cpu.flags[0])
7979@@ -136,7 +136,7 @@ static void get_flags(void)
7980 cpu.model += ((tfms >> 16) & 0xf) << 4;
7981 }
7982
7983- asm("cpuid"
7984+ asm volatile("cpuid"
7985 : "=a" (max_amd_level)
7986 : "a" (0x80000000)
7987 : "ebx", "ecx", "edx");
7988@@ -144,7 +144,7 @@ static void get_flags(void)
7989 if (max_amd_level >= 0x80000001 &&
7990 max_amd_level <= 0x8000ffff) {
7991 u32 eax = 0x80000001;
7992- asm("cpuid"
7993+ asm volatile("cpuid"
7994 : "+a" (eax),
7995 "=c" (cpu.flags[6]),
7996 "=d" (cpu.flags[1])
7997@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7998 u32 ecx = MSR_K7_HWCR;
7999 u32 eax, edx;
8000
8001- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8002+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8003 eax &= ~(1 << 15);
8004- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8005+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8006
8007 get_flags(); /* Make sure it really did something */
8008 err = check_flags();
8009@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8010 u32 ecx = MSR_VIA_FCR;
8011 u32 eax, edx;
8012
8013- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8014+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8015 eax |= (1<<1)|(1<<7);
8016- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8017+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8018
8019 set_bit(X86_FEATURE_CX8, cpu.flags);
8020 err = check_flags();
8021@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8022 u32 eax, edx;
8023 u32 level = 1;
8024
8025- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8026- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8027- asm("cpuid"
8028+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8029+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8030+ asm volatile("cpuid"
8031 : "+a" (level), "=d" (cpu.flags[0])
8032 : : "ecx", "ebx");
8033- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8034+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8035
8036 err = check_flags();
8037 }
8038diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
8039index f1bbeeb..aff09cb 100644
8040--- a/arch/x86/boot/header.S
8041+++ b/arch/x86/boot/header.S
8042@@ -372,7 +372,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
8043 # single linked list of
8044 # struct setup_data
8045
8046-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
8047+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
8048
8049 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
8050 #define VO_INIT_SIZE (VO__end - VO__text)
8051diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
8052index db75d07..8e6d0af 100644
8053--- a/arch/x86/boot/memory.c
8054+++ b/arch/x86/boot/memory.c
8055@@ -19,7 +19,7 @@
8056
8057 static int detect_memory_e820(void)
8058 {
8059- int count = 0;
8060+ unsigned int count = 0;
8061 struct biosregs ireg, oreg;
8062 struct e820entry *desc = boot_params.e820_map;
8063 static struct e820entry buf; /* static so it is zeroed */
8064diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
8065index 11e8c6e..fdbb1ed 100644
8066--- a/arch/x86/boot/video-vesa.c
8067+++ b/arch/x86/boot/video-vesa.c
8068@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
8069
8070 boot_params.screen_info.vesapm_seg = oreg.es;
8071 boot_params.screen_info.vesapm_off = oreg.di;
8072+ boot_params.screen_info.vesapm_size = oreg.cx;
8073 }
8074
8075 /*
8076diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
8077index 43eda28..5ab5fdb 100644
8078--- a/arch/x86/boot/video.c
8079+++ b/arch/x86/boot/video.c
8080@@ -96,7 +96,7 @@ static void store_mode_params(void)
8081 static unsigned int get_entry(void)
8082 {
8083 char entry_buf[4];
8084- int i, len = 0;
8085+ unsigned int i, len = 0;
8086 int key;
8087 unsigned int v;
8088
8089diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
8090index 5b577d5..3c1fed4 100644
8091--- a/arch/x86/crypto/aes-x86_64-asm_64.S
8092+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
8093@@ -8,6 +8,8 @@
8094 * including this sentence is retained in full.
8095 */
8096
8097+#include <asm/alternative-asm.h>
8098+
8099 .extern crypto_ft_tab
8100 .extern crypto_it_tab
8101 .extern crypto_fl_tab
8102@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
8103 je B192; \
8104 leaq 32(r9),r9;
8105
8106+#define ret pax_force_retaddr 0, 1; ret
8107+
8108 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
8109 movq r1,r2; \
8110 movq r3,r4; \
8111diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
8112index 3470624..201259d 100644
8113--- a/arch/x86/crypto/aesni-intel_asm.S
8114+++ b/arch/x86/crypto/aesni-intel_asm.S
8115@@ -31,6 +31,7 @@
8116
8117 #include <linux/linkage.h>
8118 #include <asm/inst.h>
8119+#include <asm/alternative-asm.h>
8120
8121 #ifdef __x86_64__
8122 .data
8123@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
8124 pop %r14
8125 pop %r13
8126 pop %r12
8127+ pax_force_retaddr 0, 1
8128 ret
8129+ENDPROC(aesni_gcm_dec)
8130
8131
8132 /*****************************************************************************
8133@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
8134 pop %r14
8135 pop %r13
8136 pop %r12
8137+ pax_force_retaddr 0, 1
8138 ret
8139+ENDPROC(aesni_gcm_enc)
8140
8141 #endif
8142
8143@@ -1714,6 +1719,7 @@ _key_expansion_256a:
8144 pxor %xmm1, %xmm0
8145 movaps %xmm0, (TKEYP)
8146 add $0x10, TKEYP
8147+ pax_force_retaddr_bts
8148 ret
8149
8150 .align 4
8151@@ -1738,6 +1744,7 @@ _key_expansion_192a:
8152 shufps $0b01001110, %xmm2, %xmm1
8153 movaps %xmm1, 0x10(TKEYP)
8154 add $0x20, TKEYP
8155+ pax_force_retaddr_bts
8156 ret
8157
8158 .align 4
8159@@ -1757,6 +1764,7 @@ _key_expansion_192b:
8160
8161 movaps %xmm0, (TKEYP)
8162 add $0x10, TKEYP
8163+ pax_force_retaddr_bts
8164 ret
8165
8166 .align 4
8167@@ -1769,6 +1777,7 @@ _key_expansion_256b:
8168 pxor %xmm1, %xmm2
8169 movaps %xmm2, (TKEYP)
8170 add $0x10, TKEYP
8171+ pax_force_retaddr_bts
8172 ret
8173
8174 /*
8175@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
8176 #ifndef __x86_64__
8177 popl KEYP
8178 #endif
8179+ pax_force_retaddr 0, 1
8180 ret
8181+ENDPROC(aesni_set_key)
8182
8183 /*
8184 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
8185@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
8186 popl KLEN
8187 popl KEYP
8188 #endif
8189+ pax_force_retaddr 0, 1
8190 ret
8191+ENDPROC(aesni_enc)
8192
8193 /*
8194 * _aesni_enc1: internal ABI
8195@@ -1959,6 +1972,7 @@ _aesni_enc1:
8196 AESENC KEY STATE
8197 movaps 0x70(TKEYP), KEY
8198 AESENCLAST KEY STATE
8199+ pax_force_retaddr_bts
8200 ret
8201
8202 /*
8203@@ -2067,6 +2081,7 @@ _aesni_enc4:
8204 AESENCLAST KEY STATE2
8205 AESENCLAST KEY STATE3
8206 AESENCLAST KEY STATE4
8207+ pax_force_retaddr_bts
8208 ret
8209
8210 /*
8211@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
8212 popl KLEN
8213 popl KEYP
8214 #endif
8215+ pax_force_retaddr 0, 1
8216 ret
8217+ENDPROC(aesni_dec)
8218
8219 /*
8220 * _aesni_dec1: internal ABI
8221@@ -2146,6 +2163,7 @@ _aesni_dec1:
8222 AESDEC KEY STATE
8223 movaps 0x70(TKEYP), KEY
8224 AESDECLAST KEY STATE
8225+ pax_force_retaddr_bts
8226 ret
8227
8228 /*
8229@@ -2254,6 +2272,7 @@ _aesni_dec4:
8230 AESDECLAST KEY STATE2
8231 AESDECLAST KEY STATE3
8232 AESDECLAST KEY STATE4
8233+ pax_force_retaddr_bts
8234 ret
8235
8236 /*
8237@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
8238 popl KEYP
8239 popl LEN
8240 #endif
8241+ pax_force_retaddr 0, 1
8242 ret
8243+ENDPROC(aesni_ecb_enc)
8244
8245 /*
8246 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8247@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
8248 popl KEYP
8249 popl LEN
8250 #endif
8251+ pax_force_retaddr 0, 1
8252 ret
8253+ENDPROC(aesni_ecb_dec)
8254
8255 /*
8256 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8257@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
8258 popl LEN
8259 popl IVP
8260 #endif
8261+ pax_force_retaddr 0, 1
8262 ret
8263+ENDPROC(aesni_cbc_enc)
8264
8265 /*
8266 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8267@@ -2500,7 +2525,9 @@ ENTRY(aesni_cbc_dec)
8268 popl LEN
8269 popl IVP
8270 #endif
8271+ pax_force_retaddr 0, 1
8272 ret
8273+ENDPROC(aesni_cbc_dec)
8274
8275 #ifdef __x86_64__
8276 .align 16
8277@@ -2526,6 +2553,7 @@ _aesni_inc_init:
8278 mov $1, TCTR_LOW
8279 MOVQ_R64_XMM TCTR_LOW INC
8280 MOVQ_R64_XMM CTR TCTR_LOW
8281+ pax_force_retaddr_bts
8282 ret
8283
8284 /*
8285@@ -2554,6 +2582,7 @@ _aesni_inc:
8286 .Linc_low:
8287 movaps CTR, IV
8288 PSHUFB_XMM BSWAP_MASK IV
8289+ pax_force_retaddr_bts
8290 ret
8291
8292 /*
8293@@ -2614,5 +2643,7 @@ ENTRY(aesni_ctr_enc)
8294 .Lctr_enc_ret:
8295 movups IV, (IVP)
8296 .Lctr_enc_just_ret:
8297+ pax_force_retaddr 0, 1
8298 ret
8299+ENDPROC(aesni_ctr_enc)
8300 #endif
8301diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8302index 391d245..67f35c2 100644
8303--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
8304+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8305@@ -20,6 +20,8 @@
8306 *
8307 */
8308
8309+#include <asm/alternative-asm.h>
8310+
8311 .file "blowfish-x86_64-asm.S"
8312 .text
8313
8314@@ -151,9 +153,11 @@ __blowfish_enc_blk:
8315 jnz __enc_xor;
8316
8317 write_block();
8318+ pax_force_retaddr 0, 1
8319 ret;
8320 __enc_xor:
8321 xor_block();
8322+ pax_force_retaddr 0, 1
8323 ret;
8324
8325 .align 8
8326@@ -188,6 +192,7 @@ blowfish_dec_blk:
8327
8328 movq %r11, %rbp;
8329
8330+ pax_force_retaddr 0, 1
8331 ret;
8332
8333 /**********************************************************************
8334@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
8335
8336 popq %rbx;
8337 popq %rbp;
8338+ pax_force_retaddr 0, 1
8339 ret;
8340
8341 __enc_xor4:
8342@@ -349,6 +355,7 @@ __enc_xor4:
8343
8344 popq %rbx;
8345 popq %rbp;
8346+ pax_force_retaddr 0, 1
8347 ret;
8348
8349 .align 8
8350@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
8351 popq %rbx;
8352 popq %rbp;
8353
8354+ pax_force_retaddr 0, 1
8355 ret;
8356
8357diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
8358index 0b33743..7a56206 100644
8359--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
8360+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
8361@@ -20,6 +20,8 @@
8362 *
8363 */
8364
8365+#include <asm/alternative-asm.h>
8366+
8367 .file "camellia-x86_64-asm_64.S"
8368 .text
8369
8370@@ -229,12 +231,14 @@ __enc_done:
8371 enc_outunpack(mov, RT1);
8372
8373 movq RRBP, %rbp;
8374+ pax_force_retaddr 0, 1
8375 ret;
8376
8377 __enc_xor:
8378 enc_outunpack(xor, RT1);
8379
8380 movq RRBP, %rbp;
8381+ pax_force_retaddr 0, 1
8382 ret;
8383
8384 .global camellia_dec_blk;
8385@@ -275,6 +279,7 @@ __dec_rounds16:
8386 dec_outunpack();
8387
8388 movq RRBP, %rbp;
8389+ pax_force_retaddr 0, 1
8390 ret;
8391
8392 /**********************************************************************
8393@@ -468,6 +473,7 @@ __enc2_done:
8394
8395 movq RRBP, %rbp;
8396 popq %rbx;
8397+ pax_force_retaddr 0, 1
8398 ret;
8399
8400 __enc2_xor:
8401@@ -475,6 +481,7 @@ __enc2_xor:
8402
8403 movq RRBP, %rbp;
8404 popq %rbx;
8405+ pax_force_retaddr 0, 1
8406 ret;
8407
8408 .global camellia_dec_blk_2way;
8409@@ -517,4 +524,5 @@ __dec2_rounds16:
8410
8411 movq RRBP, %rbp;
8412 movq RXOR, %rbx;
8413+ pax_force_retaddr 0, 1
8414 ret;
8415diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8416index 6214a9b..1f4fc9a 100644
8417--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
8418+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8419@@ -1,3 +1,5 @@
8420+#include <asm/alternative-asm.h>
8421+
8422 # enter ECRYPT_encrypt_bytes
8423 .text
8424 .p2align 5
8425@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
8426 add %r11,%rsp
8427 mov %rdi,%rax
8428 mov %rsi,%rdx
8429+ pax_force_retaddr 0, 1
8430 ret
8431 # bytesatleast65:
8432 ._bytesatleast65:
8433@@ -891,6 +894,7 @@ ECRYPT_keysetup:
8434 add %r11,%rsp
8435 mov %rdi,%rax
8436 mov %rsi,%rdx
8437+ pax_force_retaddr
8438 ret
8439 # enter ECRYPT_ivsetup
8440 .text
8441@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
8442 add %r11,%rsp
8443 mov %rdi,%rax
8444 mov %rsi,%rdx
8445+ pax_force_retaddr
8446 ret
8447diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8448index 3ee1ff0..cbc568b 100644
8449--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8450+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8451@@ -24,6 +24,8 @@
8452 *
8453 */
8454
8455+#include <asm/alternative-asm.h>
8456+
8457 .file "serpent-sse2-x86_64-asm_64.S"
8458 .text
8459
8460@@ -692,12 +694,14 @@ __serpent_enc_blk_8way:
8461 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8462 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8463
8464+ pax_force_retaddr
8465 ret;
8466
8467 __enc_xor8:
8468 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8469 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8470
8471+ pax_force_retaddr
8472 ret;
8473
8474 .align 8
8475@@ -755,4 +759,5 @@ serpent_dec_blk_8way:
8476 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
8477 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
8478
8479+ pax_force_retaddr
8480 ret;
8481diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
8482index b2c2f57..8470cab 100644
8483--- a/arch/x86/crypto/sha1_ssse3_asm.S
8484+++ b/arch/x86/crypto/sha1_ssse3_asm.S
8485@@ -28,6 +28,8 @@
8486 * (at your option) any later version.
8487 */
8488
8489+#include <asm/alternative-asm.h>
8490+
8491 #define CTX %rdi // arg1
8492 #define BUF %rsi // arg2
8493 #define CNT %rdx // arg3
8494@@ -104,6 +106,7 @@
8495 pop %r12
8496 pop %rbp
8497 pop %rbx
8498+ pax_force_retaddr 0, 1
8499 ret
8500
8501 .size \name, .-\name
8502diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8503index 5b012a2..36d5364 100644
8504--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8505+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8506@@ -20,6 +20,8 @@
8507 *
8508 */
8509
8510+#include <asm/alternative-asm.h>
8511+
8512 .file "twofish-x86_64-asm-3way.S"
8513 .text
8514
8515@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
8516 popq %r13;
8517 popq %r14;
8518 popq %r15;
8519+ pax_force_retaddr 0, 1
8520 ret;
8521
8522 __enc_xor3:
8523@@ -271,6 +274,7 @@ __enc_xor3:
8524 popq %r13;
8525 popq %r14;
8526 popq %r15;
8527+ pax_force_retaddr 0, 1
8528 ret;
8529
8530 .global twofish_dec_blk_3way
8531@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
8532 popq %r13;
8533 popq %r14;
8534 popq %r15;
8535+ pax_force_retaddr 0, 1
8536 ret;
8537
8538diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
8539index 7bcf3fc..f53832f 100644
8540--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8541+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
8542@@ -21,6 +21,7 @@
8543 .text
8544
8545 #include <asm/asm-offsets.h>
8546+#include <asm/alternative-asm.h>
8547
8548 #define a_offset 0
8549 #define b_offset 4
8550@@ -268,6 +269,7 @@ twofish_enc_blk:
8551
8552 popq R1
8553 movq $1,%rax
8554+ pax_force_retaddr 0, 1
8555 ret
8556
8557 twofish_dec_blk:
8558@@ -319,4 +321,5 @@ twofish_dec_blk:
8559
8560 popq R1
8561 movq $1,%rax
8562+ pax_force_retaddr 0, 1
8563 ret
8564diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
8565index 07b3a68..bd2a388 100644
8566--- a/arch/x86/ia32/ia32_aout.c
8567+++ b/arch/x86/ia32/ia32_aout.c
8568@@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8569 unsigned long dump_start, dump_size;
8570 struct user32 dump;
8571
8572+ memset(&dump, 0, sizeof(dump));
8573+
8574 fs = get_fs();
8575 set_fs(KERNEL_DS);
8576 has_dumped = 1;
8577diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8578index a69245b..6d145f4 100644
8579--- a/arch/x86/ia32/ia32_signal.c
8580+++ b/arch/x86/ia32/ia32_signal.c
8581@@ -168,7 +168,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8582 }
8583 seg = get_fs();
8584 set_fs(KERNEL_DS);
8585- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8586+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8587 set_fs(seg);
8588 if (ret >= 0 && uoss_ptr) {
8589 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8590@@ -369,7 +369,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8591 */
8592 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8593 size_t frame_size,
8594- void **fpstate)
8595+ void __user **fpstate)
8596 {
8597 unsigned long sp;
8598
8599@@ -390,7 +390,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8600
8601 if (used_math()) {
8602 sp = sp - sig_xstate_ia32_size;
8603- *fpstate = (struct _fpstate_ia32 *) sp;
8604+ *fpstate = (struct _fpstate_ia32 __user *) sp;
8605 if (save_i387_xstate_ia32(*fpstate) < 0)
8606 return (void __user *) -1L;
8607 }
8608@@ -398,7 +398,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8609 sp -= frame_size;
8610 /* Align the stack pointer according to the i386 ABI,
8611 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8612- sp = ((sp + 4) & -16ul) - 4;
8613+ sp = ((sp - 12) & -16ul) - 4;
8614 return (void __user *) sp;
8615 }
8616
8617@@ -456,7 +456,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8618 * These are actually not used anymore, but left because some
8619 * gdb versions depend on them as a marker.
8620 */
8621- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8622+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8623 } put_user_catch(err);
8624
8625 if (err)
8626@@ -498,7 +498,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8627 0xb8,
8628 __NR_ia32_rt_sigreturn,
8629 0x80cd,
8630- 0,
8631+ 0
8632 };
8633
8634 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8635@@ -528,16 +528,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8636
8637 if (ka->sa.sa_flags & SA_RESTORER)
8638 restorer = ka->sa.sa_restorer;
8639+ else if (current->mm->context.vdso)
8640+ /* Return stub is in 32bit vsyscall page */
8641+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8642 else
8643- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8644- rt_sigreturn);
8645+ restorer = &frame->retcode;
8646 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8647
8648 /*
8649 * Not actually used anymore, but left because some gdb
8650 * versions need it.
8651 */
8652- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8653+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8654 } put_user_catch(err);
8655
8656 if (err)
8657diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8658index e3e7340..05ed805 100644
8659--- a/arch/x86/ia32/ia32entry.S
8660+++ b/arch/x86/ia32/ia32entry.S
8661@@ -13,8 +13,10 @@
8662 #include <asm/thread_info.h>
8663 #include <asm/segment.h>
8664 #include <asm/irqflags.h>
8665+#include <asm/pgtable.h>
8666 #include <linux/linkage.h>
8667 #include <linux/err.h>
8668+#include <asm/alternative-asm.h>
8669
8670 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8671 #include <linux/elf-em.h>
8672@@ -94,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
8673 ENDPROC(native_irq_enable_sysexit)
8674 #endif
8675
8676+ .macro pax_enter_kernel_user
8677+ pax_set_fptr_mask
8678+#ifdef CONFIG_PAX_MEMORY_UDEREF
8679+ call pax_enter_kernel_user
8680+#endif
8681+ .endm
8682+
8683+ .macro pax_exit_kernel_user
8684+#ifdef CONFIG_PAX_MEMORY_UDEREF
8685+ call pax_exit_kernel_user
8686+#endif
8687+#ifdef CONFIG_PAX_RANDKSTACK
8688+ pushq %rax
8689+ pushq %r11
8690+ call pax_randomize_kstack
8691+ popq %r11
8692+ popq %rax
8693+#endif
8694+ .endm
8695+
8696+.macro pax_erase_kstack
8697+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8698+ call pax_erase_kstack
8699+#endif
8700+.endm
8701+
8702 /*
8703 * 32bit SYSENTER instruction entry.
8704 *
8705@@ -120,12 +148,6 @@ ENTRY(ia32_sysenter_target)
8706 CFI_REGISTER rsp,rbp
8707 SWAPGS_UNSAFE_STACK
8708 movq PER_CPU_VAR(kernel_stack), %rsp
8709- addq $(KERNEL_STACK_OFFSET),%rsp
8710- /*
8711- * No need to follow this irqs on/off section: the syscall
8712- * disabled irqs, here we enable it straight after entry:
8713- */
8714- ENABLE_INTERRUPTS(CLBR_NONE)
8715 movl %ebp,%ebp /* zero extension */
8716 pushq_cfi $__USER32_DS
8717 /*CFI_REL_OFFSET ss,0*/
8718@@ -133,24 +155,39 @@ ENTRY(ia32_sysenter_target)
8719 CFI_REL_OFFSET rsp,0
8720 pushfq_cfi
8721 /*CFI_REL_OFFSET rflags,0*/
8722- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
8723- CFI_REGISTER rip,r10
8724+ orl $X86_EFLAGS_IF,(%rsp)
8725+ GET_THREAD_INFO(%r11)
8726+ movl TI_sysenter_return(%r11), %r11d
8727+ CFI_REGISTER rip,r11
8728 pushq_cfi $__USER32_CS
8729 /*CFI_REL_OFFSET cs,0*/
8730 movl %eax, %eax
8731- pushq_cfi %r10
8732+ pushq_cfi %r11
8733 CFI_REL_OFFSET rip,0
8734 pushq_cfi %rax
8735 cld
8736 SAVE_ARGS 0,1,0
8737+ pax_enter_kernel_user
8738+ /*
8739+ * No need to follow this irqs on/off section: the syscall
8740+ * disabled irqs, here we enable it straight after entry:
8741+ */
8742+ ENABLE_INTERRUPTS(CLBR_NONE)
8743 /* no need to do an access_ok check here because rbp has been
8744 32bit zero extended */
8745+
8746+#ifdef CONFIG_PAX_MEMORY_UDEREF
8747+ mov $PAX_USER_SHADOW_BASE,%r11
8748+ add %r11,%rbp
8749+#endif
8750+
8751 1: movl (%rbp),%ebp
8752 .section __ex_table,"a"
8753 .quad 1b,ia32_badarg
8754 .previous
8755- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8756- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8757+ GET_THREAD_INFO(%r11)
8758+ orl $TS_COMPAT,TI_status(%r11)
8759+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8760 CFI_REMEMBER_STATE
8761 jnz sysenter_tracesys
8762 cmpq $(IA32_NR_syscalls-1),%rax
8763@@ -160,12 +197,15 @@ sysenter_do_call:
8764 sysenter_dispatch:
8765 call *ia32_sys_call_table(,%rax,8)
8766 movq %rax,RAX-ARGOFFSET(%rsp)
8767+ GET_THREAD_INFO(%r11)
8768 DISABLE_INTERRUPTS(CLBR_NONE)
8769 TRACE_IRQS_OFF
8770- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8771+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8772 jnz sysexit_audit
8773 sysexit_from_sys_call:
8774- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8775+ pax_exit_kernel_user
8776+ pax_erase_kstack
8777+ andl $~TS_COMPAT,TI_status(%r11)
8778 /* clear IF, that popfq doesn't enable interrupts early */
8779 andl $~0x200,EFLAGS-R11(%rsp)
8780 movl RIP-R11(%rsp),%edx /* User %eip */
8781@@ -191,6 +231,9 @@ sysexit_from_sys_call:
8782 movl %eax,%esi /* 2nd arg: syscall number */
8783 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8784 call __audit_syscall_entry
8785+
8786+ pax_erase_kstack
8787+
8788 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8789 cmpq $(IA32_NR_syscalls-1),%rax
8790 ja ia32_badsys
8791@@ -202,7 +245,7 @@ sysexit_from_sys_call:
8792 .endm
8793
8794 .macro auditsys_exit exit
8795- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8796+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8797 jnz ia32_ret_from_sys_call
8798 TRACE_IRQS_ON
8799 sti
8800@@ -213,11 +256,12 @@ sysexit_from_sys_call:
8801 1: setbe %al /* 1 if error, 0 if not */
8802 movzbl %al,%edi /* zero-extend that into %edi */
8803 call __audit_syscall_exit
8804+ GET_THREAD_INFO(%r11)
8805 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
8806 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8807 cli
8808 TRACE_IRQS_OFF
8809- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8810+ testl %edi,TI_flags(%r11)
8811 jz \exit
8812 CLEAR_RREGS -ARGOFFSET
8813 jmp int_with_check
8814@@ -235,7 +279,7 @@ sysexit_audit:
8815
8816 sysenter_tracesys:
8817 #ifdef CONFIG_AUDITSYSCALL
8818- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8819+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8820 jz sysenter_auditsys
8821 #endif
8822 SAVE_REST
8823@@ -243,6 +287,9 @@ sysenter_tracesys:
8824 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8825 movq %rsp,%rdi /* &pt_regs -> arg1 */
8826 call syscall_trace_enter
8827+
8828+ pax_erase_kstack
8829+
8830 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8831 RESTORE_REST
8832 cmpq $(IA32_NR_syscalls-1),%rax
8833@@ -274,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
8834 ENTRY(ia32_cstar_target)
8835 CFI_STARTPROC32 simple
8836 CFI_SIGNAL_FRAME
8837- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8838+ CFI_DEF_CFA rsp,0
8839 CFI_REGISTER rip,rcx
8840 /*CFI_REGISTER rflags,r11*/
8841 SWAPGS_UNSAFE_STACK
8842 movl %esp,%r8d
8843 CFI_REGISTER rsp,r8
8844 movq PER_CPU_VAR(kernel_stack),%rsp
8845+ SAVE_ARGS 8*6,0,0
8846+ pax_enter_kernel_user
8847 /*
8848 * No need to follow this irqs on/off section: the syscall
8849 * disabled irqs and here we enable it straight after entry:
8850 */
8851 ENABLE_INTERRUPTS(CLBR_NONE)
8852- SAVE_ARGS 8,0,0
8853 movl %eax,%eax /* zero extension */
8854 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8855 movq %rcx,RIP-ARGOFFSET(%rsp)
8856@@ -302,12 +350,19 @@ ENTRY(ia32_cstar_target)
8857 /* no need to do an access_ok check here because r8 has been
8858 32bit zero extended */
8859 /* hardware stack frame is complete now */
8860+
8861+#ifdef CONFIG_PAX_MEMORY_UDEREF
8862+ mov $PAX_USER_SHADOW_BASE,%r11
8863+ add %r11,%r8
8864+#endif
8865+
8866 1: movl (%r8),%r9d
8867 .section __ex_table,"a"
8868 .quad 1b,ia32_badarg
8869 .previous
8870- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8871- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8872+ GET_THREAD_INFO(%r11)
8873+ orl $TS_COMPAT,TI_status(%r11)
8874+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8875 CFI_REMEMBER_STATE
8876 jnz cstar_tracesys
8877 cmpq $IA32_NR_syscalls-1,%rax
8878@@ -317,12 +372,15 @@ cstar_do_call:
8879 cstar_dispatch:
8880 call *ia32_sys_call_table(,%rax,8)
8881 movq %rax,RAX-ARGOFFSET(%rsp)
8882+ GET_THREAD_INFO(%r11)
8883 DISABLE_INTERRUPTS(CLBR_NONE)
8884 TRACE_IRQS_OFF
8885- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8886+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8887 jnz sysretl_audit
8888 sysretl_from_sys_call:
8889- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8890+ pax_exit_kernel_user
8891+ pax_erase_kstack
8892+ andl $~TS_COMPAT,TI_status(%r11)
8893 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
8894 movl RIP-ARGOFFSET(%rsp),%ecx
8895 CFI_REGISTER rip,rcx
8896@@ -350,7 +408,7 @@ sysretl_audit:
8897
8898 cstar_tracesys:
8899 #ifdef CONFIG_AUDITSYSCALL
8900- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8901+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8902 jz cstar_auditsys
8903 #endif
8904 xchgl %r9d,%ebp
8905@@ -359,6 +417,9 @@ cstar_tracesys:
8906 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8907 movq %rsp,%rdi /* &pt_regs -> arg1 */
8908 call syscall_trace_enter
8909+
8910+ pax_erase_kstack
8911+
8912 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8913 RESTORE_REST
8914 xchgl %ebp,%r9d
8915@@ -404,19 +465,21 @@ ENTRY(ia32_syscall)
8916 CFI_REL_OFFSET rip,RIP-RIP
8917 PARAVIRT_ADJUST_EXCEPTION_FRAME
8918 SWAPGS
8919- /*
8920- * No need to follow this irqs on/off section: the syscall
8921- * disabled irqs and here we enable it straight after entry:
8922- */
8923- ENABLE_INTERRUPTS(CLBR_NONE)
8924 movl %eax,%eax
8925 pushq_cfi %rax
8926 cld
8927 /* note the registers are not zero extended to the sf.
8928 this could be a problem. */
8929 SAVE_ARGS 0,1,0
8930- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8931- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8932+ pax_enter_kernel_user
8933+ /*
8934+ * No need to follow this irqs on/off section: the syscall
8935+ * disabled irqs and here we enable it straight after entry:
8936+ */
8937+ ENABLE_INTERRUPTS(CLBR_NONE)
8938+ GET_THREAD_INFO(%r11)
8939+ orl $TS_COMPAT,TI_status(%r11)
8940+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8941 jnz ia32_tracesys
8942 cmpq $(IA32_NR_syscalls-1),%rax
8943 ja ia32_badsys
8944@@ -435,6 +498,9 @@ ia32_tracesys:
8945 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8946 movq %rsp,%rdi /* &pt_regs -> arg1 */
8947 call syscall_trace_enter
8948+
8949+ pax_erase_kstack
8950+
8951 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8952 RESTORE_REST
8953 cmpq $(IA32_NR_syscalls-1),%rax
8954diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8955index aec2202..f76174e 100644
8956--- a/arch/x86/ia32/sys_ia32.c
8957+++ b/arch/x86/ia32/sys_ia32.c
8958@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8959 */
8960 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8961 {
8962- typeof(ubuf->st_uid) uid = 0;
8963- typeof(ubuf->st_gid) gid = 0;
8964+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
8965+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
8966 SET_UID(uid, stat->uid);
8967 SET_GID(gid, stat->gid);
8968 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8969@@ -292,7 +292,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
8970 return alarm_setitimer(seconds);
8971 }
8972
8973-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
8974+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
8975 int options)
8976 {
8977 return compat_sys_wait4(pid, stat_addr, options, NULL);
8978@@ -313,7 +313,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8979 mm_segment_t old_fs = get_fs();
8980
8981 set_fs(KERNEL_DS);
8982- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8983+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8984 set_fs(old_fs);
8985 if (put_compat_timespec(&t, interval))
8986 return -EFAULT;
8987@@ -329,7 +329,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8988 mm_segment_t old_fs = get_fs();
8989
8990 set_fs(KERNEL_DS);
8991- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8992+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8993 set_fs(old_fs);
8994 if (!ret) {
8995 switch (_NSIG_WORDS) {
8996@@ -354,7 +354,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8997 if (copy_siginfo_from_user32(&info, uinfo))
8998 return -EFAULT;
8999 set_fs(KERNEL_DS);
9000- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
9001+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
9002 set_fs(old_fs);
9003 return ret;
9004 }
9005@@ -399,7 +399,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
9006 return -EFAULT;
9007
9008 set_fs(KERNEL_DS);
9009- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
9010+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
9011 count);
9012 set_fs(old_fs);
9013
9014diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
9015index 952bd01..7692c6f 100644
9016--- a/arch/x86/include/asm/alternative-asm.h
9017+++ b/arch/x86/include/asm/alternative-asm.h
9018@@ -15,6 +15,45 @@
9019 .endm
9020 #endif
9021
9022+#ifdef KERNEXEC_PLUGIN
9023+ .macro pax_force_retaddr_bts rip=0
9024+ btsq $63,\rip(%rsp)
9025+ .endm
9026+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9027+ .macro pax_force_retaddr rip=0, reload=0
9028+ btsq $63,\rip(%rsp)
9029+ .endm
9030+ .macro pax_force_fptr ptr
9031+ btsq $63,\ptr
9032+ .endm
9033+ .macro pax_set_fptr_mask
9034+ .endm
9035+#endif
9036+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
9037+ .macro pax_force_retaddr rip=0, reload=0
9038+ .if \reload
9039+ pax_set_fptr_mask
9040+ .endif
9041+ orq %r10,\rip(%rsp)
9042+ .endm
9043+ .macro pax_force_fptr ptr
9044+ orq %r10,\ptr
9045+ .endm
9046+ .macro pax_set_fptr_mask
9047+ movabs $0x8000000000000000,%r10
9048+ .endm
9049+#endif
9050+#else
9051+ .macro pax_force_retaddr rip=0, reload=0
9052+ .endm
9053+ .macro pax_force_fptr ptr
9054+ .endm
9055+ .macro pax_force_retaddr_bts rip=0
9056+ .endm
9057+ .macro pax_set_fptr_mask
9058+ .endm
9059+#endif
9060+
9061 .macro altinstruction_entry orig alt feature orig_len alt_len
9062 .long \orig - .
9063 .long \alt - .
9064diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
9065index 49331be..9706065 100644
9066--- a/arch/x86/include/asm/alternative.h
9067+++ b/arch/x86/include/asm/alternative.h
9068@@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
9069 ".section .discard,\"aw\",@progbits\n" \
9070 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
9071 ".previous\n" \
9072- ".section .altinstr_replacement, \"ax\"\n" \
9073+ ".section .altinstr_replacement, \"a\"\n" \
9074 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
9075 ".previous"
9076
9077diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
9078index d854101..f6ea947 100644
9079--- a/arch/x86/include/asm/apic.h
9080+++ b/arch/x86/include/asm/apic.h
9081@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
9082
9083 #ifdef CONFIG_X86_LOCAL_APIC
9084
9085-extern unsigned int apic_verbosity;
9086+extern int apic_verbosity;
9087 extern int local_apic_timer_c2_ok;
9088
9089 extern int disable_apic;
9090diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
9091index 20370c6..a2eb9b0 100644
9092--- a/arch/x86/include/asm/apm.h
9093+++ b/arch/x86/include/asm/apm.h
9094@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
9095 __asm__ __volatile__(APM_DO_ZERO_SEGS
9096 "pushl %%edi\n\t"
9097 "pushl %%ebp\n\t"
9098- "lcall *%%cs:apm_bios_entry\n\t"
9099+ "lcall *%%ss:apm_bios_entry\n\t"
9100 "setc %%al\n\t"
9101 "popl %%ebp\n\t"
9102 "popl %%edi\n\t"
9103@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
9104 __asm__ __volatile__(APM_DO_ZERO_SEGS
9105 "pushl %%edi\n\t"
9106 "pushl %%ebp\n\t"
9107- "lcall *%%cs:apm_bios_entry\n\t"
9108+ "lcall *%%ss:apm_bios_entry\n\t"
9109 "setc %%bl\n\t"
9110 "popl %%ebp\n\t"
9111 "popl %%edi\n\t"
9112diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
9113index 58cb6d4..ca9010d 100644
9114--- a/arch/x86/include/asm/atomic.h
9115+++ b/arch/x86/include/asm/atomic.h
9116@@ -22,7 +22,18 @@
9117 */
9118 static inline int atomic_read(const atomic_t *v)
9119 {
9120- return (*(volatile int *)&(v)->counter);
9121+ return (*(volatile const int *)&(v)->counter);
9122+}
9123+
9124+/**
9125+ * atomic_read_unchecked - read atomic variable
9126+ * @v: pointer of type atomic_unchecked_t
9127+ *
9128+ * Atomically reads the value of @v.
9129+ */
9130+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9131+{
9132+ return (*(volatile const int *)&(v)->counter);
9133 }
9134
9135 /**
9136@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
9137 }
9138
9139 /**
9140+ * atomic_set_unchecked - set atomic variable
9141+ * @v: pointer of type atomic_unchecked_t
9142+ * @i: required value
9143+ *
9144+ * Atomically sets the value of @v to @i.
9145+ */
9146+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9147+{
9148+ v->counter = i;
9149+}
9150+
9151+/**
9152 * atomic_add - add integer to atomic variable
9153 * @i: integer value to add
9154 * @v: pointer of type atomic_t
9155@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
9156 */
9157 static inline void atomic_add(int i, atomic_t *v)
9158 {
9159- asm volatile(LOCK_PREFIX "addl %1,%0"
9160+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
9161+
9162+#ifdef CONFIG_PAX_REFCOUNT
9163+ "jno 0f\n"
9164+ LOCK_PREFIX "subl %1,%0\n"
9165+ "int $4\n0:\n"
9166+ _ASM_EXTABLE(0b, 0b)
9167+#endif
9168+
9169+ : "+m" (v->counter)
9170+ : "ir" (i));
9171+}
9172+
9173+/**
9174+ * atomic_add_unchecked - add integer to atomic variable
9175+ * @i: integer value to add
9176+ * @v: pointer of type atomic_unchecked_t
9177+ *
9178+ * Atomically adds @i to @v.
9179+ */
9180+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
9181+{
9182+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
9183 : "+m" (v->counter)
9184 : "ir" (i));
9185 }
9186@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
9187 */
9188 static inline void atomic_sub(int i, atomic_t *v)
9189 {
9190- asm volatile(LOCK_PREFIX "subl %1,%0"
9191+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
9192+
9193+#ifdef CONFIG_PAX_REFCOUNT
9194+ "jno 0f\n"
9195+ LOCK_PREFIX "addl %1,%0\n"
9196+ "int $4\n0:\n"
9197+ _ASM_EXTABLE(0b, 0b)
9198+#endif
9199+
9200+ : "+m" (v->counter)
9201+ : "ir" (i));
9202+}
9203+
9204+/**
9205+ * atomic_sub_unchecked - subtract integer from atomic variable
9206+ * @i: integer value to subtract
9207+ * @v: pointer of type atomic_unchecked_t
9208+ *
9209+ * Atomically subtracts @i from @v.
9210+ */
9211+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
9212+{
9213+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
9214 : "+m" (v->counter)
9215 : "ir" (i));
9216 }
9217@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9218 {
9219 unsigned char c;
9220
9221- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9222+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
9223+
9224+#ifdef CONFIG_PAX_REFCOUNT
9225+ "jno 0f\n"
9226+ LOCK_PREFIX "addl %2,%0\n"
9227+ "int $4\n0:\n"
9228+ _ASM_EXTABLE(0b, 0b)
9229+#endif
9230+
9231+ "sete %1\n"
9232 : "+m" (v->counter), "=qm" (c)
9233 : "ir" (i) : "memory");
9234 return c;
9235@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9236 */
9237 static inline void atomic_inc(atomic_t *v)
9238 {
9239- asm volatile(LOCK_PREFIX "incl %0"
9240+ asm volatile(LOCK_PREFIX "incl %0\n"
9241+
9242+#ifdef CONFIG_PAX_REFCOUNT
9243+ "jno 0f\n"
9244+ LOCK_PREFIX "decl %0\n"
9245+ "int $4\n0:\n"
9246+ _ASM_EXTABLE(0b, 0b)
9247+#endif
9248+
9249+ : "+m" (v->counter));
9250+}
9251+
9252+/**
9253+ * atomic_inc_unchecked - increment atomic variable
9254+ * @v: pointer of type atomic_unchecked_t
9255+ *
9256+ * Atomically increments @v by 1.
9257+ */
9258+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9259+{
9260+ asm volatile(LOCK_PREFIX "incl %0\n"
9261 : "+m" (v->counter));
9262 }
9263
9264@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
9265 */
9266 static inline void atomic_dec(atomic_t *v)
9267 {
9268- asm volatile(LOCK_PREFIX "decl %0"
9269+ asm volatile(LOCK_PREFIX "decl %0\n"
9270+
9271+#ifdef CONFIG_PAX_REFCOUNT
9272+ "jno 0f\n"
9273+ LOCK_PREFIX "incl %0\n"
9274+ "int $4\n0:\n"
9275+ _ASM_EXTABLE(0b, 0b)
9276+#endif
9277+
9278+ : "+m" (v->counter));
9279+}
9280+
9281+/**
9282+ * atomic_dec_unchecked - decrement atomic variable
9283+ * @v: pointer of type atomic_unchecked_t
9284+ *
9285+ * Atomically decrements @v by 1.
9286+ */
9287+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9288+{
9289+ asm volatile(LOCK_PREFIX "decl %0\n"
9290 : "+m" (v->counter));
9291 }
9292
9293@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9294 {
9295 unsigned char c;
9296
9297- asm volatile(LOCK_PREFIX "decl %0; sete %1"
9298+ asm volatile(LOCK_PREFIX "decl %0\n"
9299+
9300+#ifdef CONFIG_PAX_REFCOUNT
9301+ "jno 0f\n"
9302+ LOCK_PREFIX "incl %0\n"
9303+ "int $4\n0:\n"
9304+ _ASM_EXTABLE(0b, 0b)
9305+#endif
9306+
9307+ "sete %1\n"
9308 : "+m" (v->counter), "=qm" (c)
9309 : : "memory");
9310 return c != 0;
9311@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9312 {
9313 unsigned char c;
9314
9315- asm volatile(LOCK_PREFIX "incl %0; sete %1"
9316+ asm volatile(LOCK_PREFIX "incl %0\n"
9317+
9318+#ifdef CONFIG_PAX_REFCOUNT
9319+ "jno 0f\n"
9320+ LOCK_PREFIX "decl %0\n"
9321+ "int $4\n0:\n"
9322+ _ASM_EXTABLE(0b, 0b)
9323+#endif
9324+
9325+ "sete %1\n"
9326+ : "+m" (v->counter), "=qm" (c)
9327+ : : "memory");
9328+ return c != 0;
9329+}
9330+
9331+/**
9332+ * atomic_inc_and_test_unchecked - increment and test
9333+ * @v: pointer of type atomic_unchecked_t
9334+ *
9335+ * Atomically increments @v by 1
9336+ * and returns true if the result is zero, or false for all
9337+ * other cases.
9338+ */
9339+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9340+{
9341+ unsigned char c;
9342+
9343+ asm volatile(LOCK_PREFIX "incl %0\n"
9344+ "sete %1\n"
9345 : "+m" (v->counter), "=qm" (c)
9346 : : "memory");
9347 return c != 0;
9348@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9349 {
9350 unsigned char c;
9351
9352- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9353+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
9354+
9355+#ifdef CONFIG_PAX_REFCOUNT
9356+ "jno 0f\n"
9357+ LOCK_PREFIX "subl %2,%0\n"
9358+ "int $4\n0:\n"
9359+ _ASM_EXTABLE(0b, 0b)
9360+#endif
9361+
9362+ "sets %1\n"
9363 : "+m" (v->counter), "=qm" (c)
9364 : "ir" (i) : "memory");
9365 return c;
9366@@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
9367 goto no_xadd;
9368 #endif
9369 /* Modern 486+ processor */
9370- return i + xadd(&v->counter, i);
9371+ return i + xadd_check_overflow(&v->counter, i);
9372
9373 #ifdef CONFIG_M386
9374 no_xadd: /* Legacy 386 processor */
9375@@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
9376 }
9377
9378 /**
9379+ * atomic_add_return_unchecked - add integer and return
9380+ * @i: integer value to add
9381+ * @v: pointer of type atomic_unchecked_t
9382+ *
9383+ * Atomically adds @i to @v and returns @i + @v
9384+ */
9385+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9386+{
9387+#ifdef CONFIG_M386
9388+ int __i;
9389+ unsigned long flags;
9390+ if (unlikely(boot_cpu_data.x86 <= 3))
9391+ goto no_xadd;
9392+#endif
9393+ /* Modern 486+ processor */
9394+ return i + xadd(&v->counter, i);
9395+
9396+#ifdef CONFIG_M386
9397+no_xadd: /* Legacy 386 processor */
9398+ raw_local_irq_save(flags);
9399+ __i = atomic_read_unchecked(v);
9400+ atomic_set_unchecked(v, i + __i);
9401+ raw_local_irq_restore(flags);
9402+ return i + __i;
9403+#endif
9404+}
9405+
9406+/**
9407 * atomic_sub_return - subtract integer and return
9408 * @v: pointer of type atomic_t
9409 * @i: integer value to subtract
9410@@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9411 }
9412
9413 #define atomic_inc_return(v) (atomic_add_return(1, v))
9414+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9415+{
9416+ return atomic_add_return_unchecked(1, v);
9417+}
9418 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9419
9420 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9421@@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9422 return cmpxchg(&v->counter, old, new);
9423 }
9424
9425+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9426+{
9427+ return cmpxchg(&v->counter, old, new);
9428+}
9429+
9430 static inline int atomic_xchg(atomic_t *v, int new)
9431 {
9432 return xchg(&v->counter, new);
9433 }
9434
9435+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9436+{
9437+ return xchg(&v->counter, new);
9438+}
9439+
9440 /**
9441 * __atomic_add_unless - add unless the number is already a given value
9442 * @v: pointer of type atomic_t
9443@@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
9444 */
9445 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9446 {
9447- int c, old;
9448+ int c, old, new;
9449 c = atomic_read(v);
9450 for (;;) {
9451- if (unlikely(c == (u)))
9452+ if (unlikely(c == u))
9453 break;
9454- old = atomic_cmpxchg((v), c, c + (a));
9455+
9456+ asm volatile("addl %2,%0\n"
9457+
9458+#ifdef CONFIG_PAX_REFCOUNT
9459+ "jno 0f\n"
9460+ "subl %2,%0\n"
9461+ "int $4\n0:\n"
9462+ _ASM_EXTABLE(0b, 0b)
9463+#endif
9464+
9465+ : "=r" (new)
9466+ : "0" (c), "ir" (a));
9467+
9468+ old = atomic_cmpxchg(v, c, new);
9469 if (likely(old == c))
9470 break;
9471 c = old;
9472@@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9473 return c;
9474 }
9475
9476+/**
9477+ * atomic_inc_not_zero_hint - increment if not null
9478+ * @v: pointer of type atomic_t
9479+ * @hint: probable value of the atomic before the increment
9480+ *
9481+ * This version of atomic_inc_not_zero() gives a hint of probable
9482+ * value of the atomic. This helps processor to not read the memory
9483+ * before doing the atomic read/modify/write cycle, lowering
9484+ * number of bus transactions on some arches.
9485+ *
9486+ * Returns: 0 if increment was not done, 1 otherwise.
9487+ */
9488+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
9489+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
9490+{
9491+ int val, c = hint, new;
9492+
9493+ /* sanity test, should be removed by compiler if hint is a constant */
9494+ if (!hint)
9495+ return __atomic_add_unless(v, 1, 0);
9496+
9497+ do {
9498+ asm volatile("incl %0\n"
9499+
9500+#ifdef CONFIG_PAX_REFCOUNT
9501+ "jno 0f\n"
9502+ "decl %0\n"
9503+ "int $4\n0:\n"
9504+ _ASM_EXTABLE(0b, 0b)
9505+#endif
9506+
9507+ : "=r" (new)
9508+ : "0" (c));
9509+
9510+ val = atomic_cmpxchg(v, c, new);
9511+ if (val == c)
9512+ return 1;
9513+ c = val;
9514+ } while (c);
9515+
9516+ return 0;
9517+}
9518
9519 /*
9520 * atomic_dec_if_positive - decrement by 1 if old value positive
9521diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
9522index 1981199..36b9dfb 100644
9523--- a/arch/x86/include/asm/atomic64_32.h
9524+++ b/arch/x86/include/asm/atomic64_32.h
9525@@ -12,6 +12,14 @@ typedef struct {
9526 u64 __aligned(8) counter;
9527 } atomic64_t;
9528
9529+#ifdef CONFIG_PAX_REFCOUNT
9530+typedef struct {
9531+ u64 __aligned(8) counter;
9532+} atomic64_unchecked_t;
9533+#else
9534+typedef atomic64_t atomic64_unchecked_t;
9535+#endif
9536+
9537 #define ATOMIC64_INIT(val) { (val) }
9538
9539 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
9540@@ -37,21 +45,31 @@ typedef struct {
9541 ATOMIC64_DECL_ONE(sym##_386)
9542
9543 ATOMIC64_DECL_ONE(add_386);
9544+ATOMIC64_DECL_ONE(add_unchecked_386);
9545 ATOMIC64_DECL_ONE(sub_386);
9546+ATOMIC64_DECL_ONE(sub_unchecked_386);
9547 ATOMIC64_DECL_ONE(inc_386);
9548+ATOMIC64_DECL_ONE(inc_unchecked_386);
9549 ATOMIC64_DECL_ONE(dec_386);
9550+ATOMIC64_DECL_ONE(dec_unchecked_386);
9551 #endif
9552
9553 #define alternative_atomic64(f, out, in...) \
9554 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
9555
9556 ATOMIC64_DECL(read);
9557+ATOMIC64_DECL(read_unchecked);
9558 ATOMIC64_DECL(set);
9559+ATOMIC64_DECL(set_unchecked);
9560 ATOMIC64_DECL(xchg);
9561 ATOMIC64_DECL(add_return);
9562+ATOMIC64_DECL(add_return_unchecked);
9563 ATOMIC64_DECL(sub_return);
9564+ATOMIC64_DECL(sub_return_unchecked);
9565 ATOMIC64_DECL(inc_return);
9566+ATOMIC64_DECL(inc_return_unchecked);
9567 ATOMIC64_DECL(dec_return);
9568+ATOMIC64_DECL(dec_return_unchecked);
9569 ATOMIC64_DECL(dec_if_positive);
9570 ATOMIC64_DECL(inc_not_zero);
9571 ATOMIC64_DECL(add_unless);
9572@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
9573 }
9574
9575 /**
9576+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
9577+ * @p: pointer to type atomic64_unchecked_t
9578+ * @o: expected value
9579+ * @n: new value
9580+ *
9581+ * Atomically sets @v to @n if it was equal to @o and returns
9582+ * the old value.
9583+ */
9584+
9585+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
9586+{
9587+ return cmpxchg64(&v->counter, o, n);
9588+}
9589+
9590+/**
9591 * atomic64_xchg - xchg atomic64 variable
9592 * @v: pointer to type atomic64_t
9593 * @n: value to assign
9594@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
9595 }
9596
9597 /**
9598+ * atomic64_set_unchecked - set atomic64 variable
9599+ * @v: pointer to type atomic64_unchecked_t
9600+ * @n: value to assign
9601+ *
9602+ * Atomically sets the value of @v to @n.
9603+ */
9604+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
9605+{
9606+ unsigned high = (unsigned)(i >> 32);
9607+ unsigned low = (unsigned)i;
9608+ alternative_atomic64(set, /* no output */,
9609+ "S" (v), "b" (low), "c" (high)
9610+ : "eax", "edx", "memory");
9611+}
9612+
9613+/**
9614 * atomic64_read - read atomic64 variable
9615 * @v: pointer to type atomic64_t
9616 *
9617@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
9618 }
9619
9620 /**
9621+ * atomic64_read_unchecked - read atomic64 variable
9622+ * @v: pointer to type atomic64_unchecked_t
9623+ *
9624+ * Atomically reads the value of @v and returns it.
9625+ */
9626+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
9627+{
9628+ long long r;
9629+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
9630+ return r;
9631+ }
9632+
9633+/**
9634 * atomic64_add_return - add and return
9635 * @i: integer value to add
9636 * @v: pointer to type atomic64_t
9637@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
9638 return i;
9639 }
9640
9641+/**
9642+ * atomic64_add_return_unchecked - add and return
9643+ * @i: integer value to add
9644+ * @v: pointer to type atomic64_unchecked_t
9645+ *
9646+ * Atomically adds @i to @v and returns @i + *@v
9647+ */
9648+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
9649+{
9650+ alternative_atomic64(add_return_unchecked,
9651+ ASM_OUTPUT2("+A" (i), "+c" (v)),
9652+ ASM_NO_INPUT_CLOBBER("memory"));
9653+ return i;
9654+}
9655+
9656 /*
9657 * Other variants with different arithmetic operators:
9658 */
9659@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
9660 return a;
9661 }
9662
9663+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9664+{
9665+ long long a;
9666+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
9667+ "S" (v) : "memory", "ecx");
9668+ return a;
9669+}
9670+
9671 static inline long long atomic64_dec_return(atomic64_t *v)
9672 {
9673 long long a;
9674@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
9675 }
9676
9677 /**
9678+ * atomic64_add_unchecked - add integer to atomic64 variable
9679+ * @i: integer value to add
9680+ * @v: pointer to type atomic64_unchecked_t
9681+ *
9682+ * Atomically adds @i to @v.
9683+ */
9684+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
9685+{
9686+ __alternative_atomic64(add_unchecked, add_return_unchecked,
9687+ ASM_OUTPUT2("+A" (i), "+c" (v)),
9688+ ASM_NO_INPUT_CLOBBER("memory"));
9689+ return i;
9690+}
9691+
9692+/**
9693 * atomic64_sub - subtract the atomic64 variable
9694 * @i: integer value to subtract
9695 * @v: pointer to type atomic64_t
9696diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
9697index 0e1cbfc..5623683 100644
9698--- a/arch/x86/include/asm/atomic64_64.h
9699+++ b/arch/x86/include/asm/atomic64_64.h
9700@@ -18,7 +18,19 @@
9701 */
9702 static inline long atomic64_read(const atomic64_t *v)
9703 {
9704- return (*(volatile long *)&(v)->counter);
9705+ return (*(volatile const long *)&(v)->counter);
9706+}
9707+
9708+/**
9709+ * atomic64_read_unchecked - read atomic64 variable
9710+ * @v: pointer of type atomic64_unchecked_t
9711+ *
9712+ * Atomically reads the value of @v.
9713+ * Doesn't imply a read memory barrier.
9714+ */
9715+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9716+{
9717+ return (*(volatile const long *)&(v)->counter);
9718 }
9719
9720 /**
9721@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9722 }
9723
9724 /**
9725+ * atomic64_set_unchecked - set atomic64 variable
9726+ * @v: pointer to type atomic64_unchecked_t
9727+ * @i: required value
9728+ *
9729+ * Atomically sets the value of @v to @i.
9730+ */
9731+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9732+{
9733+ v->counter = i;
9734+}
9735+
9736+/**
9737 * atomic64_add - add integer to atomic64 variable
9738 * @i: integer value to add
9739 * @v: pointer to type atomic64_t
9740@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9741 */
9742 static inline void atomic64_add(long i, atomic64_t *v)
9743 {
9744+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
9745+
9746+#ifdef CONFIG_PAX_REFCOUNT
9747+ "jno 0f\n"
9748+ LOCK_PREFIX "subq %1,%0\n"
9749+ "int $4\n0:\n"
9750+ _ASM_EXTABLE(0b, 0b)
9751+#endif
9752+
9753+ : "=m" (v->counter)
9754+ : "er" (i), "m" (v->counter));
9755+}
9756+
9757+/**
9758+ * atomic64_add_unchecked - add integer to atomic64 variable
9759+ * @i: integer value to add
9760+ * @v: pointer to type atomic64_unchecked_t
9761+ *
9762+ * Atomically adds @i to @v.
9763+ */
9764+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9765+{
9766 asm volatile(LOCK_PREFIX "addq %1,%0"
9767 : "=m" (v->counter)
9768 : "er" (i), "m" (v->counter));
9769@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
9770 */
9771 static inline void atomic64_sub(long i, atomic64_t *v)
9772 {
9773- asm volatile(LOCK_PREFIX "subq %1,%0"
9774+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
9775+
9776+#ifdef CONFIG_PAX_REFCOUNT
9777+ "jno 0f\n"
9778+ LOCK_PREFIX "addq %1,%0\n"
9779+ "int $4\n0:\n"
9780+ _ASM_EXTABLE(0b, 0b)
9781+#endif
9782+
9783+ : "=m" (v->counter)
9784+ : "er" (i), "m" (v->counter));
9785+}
9786+
9787+/**
9788+ * atomic64_sub_unchecked - subtract the atomic64 variable
9789+ * @i: integer value to subtract
9790+ * @v: pointer to type atomic64_unchecked_t
9791+ *
9792+ * Atomically subtracts @i from @v.
9793+ */
9794+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
9795+{
9796+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
9797 : "=m" (v->counter)
9798 : "er" (i), "m" (v->counter));
9799 }
9800@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9801 {
9802 unsigned char c;
9803
9804- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9805+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
9806+
9807+#ifdef CONFIG_PAX_REFCOUNT
9808+ "jno 0f\n"
9809+ LOCK_PREFIX "addq %2,%0\n"
9810+ "int $4\n0:\n"
9811+ _ASM_EXTABLE(0b, 0b)
9812+#endif
9813+
9814+ "sete %1\n"
9815 : "=m" (v->counter), "=qm" (c)
9816 : "er" (i), "m" (v->counter) : "memory");
9817 return c;
9818@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9819 */
9820 static inline void atomic64_inc(atomic64_t *v)
9821 {
9822+ asm volatile(LOCK_PREFIX "incq %0\n"
9823+
9824+#ifdef CONFIG_PAX_REFCOUNT
9825+ "jno 0f\n"
9826+ LOCK_PREFIX "decq %0\n"
9827+ "int $4\n0:\n"
9828+ _ASM_EXTABLE(0b, 0b)
9829+#endif
9830+
9831+ : "=m" (v->counter)
9832+ : "m" (v->counter));
9833+}
9834+
9835+/**
9836+ * atomic64_inc_unchecked - increment atomic64 variable
9837+ * @v: pointer to type atomic64_unchecked_t
9838+ *
9839+ * Atomically increments @v by 1.
9840+ */
9841+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9842+{
9843 asm volatile(LOCK_PREFIX "incq %0"
9844 : "=m" (v->counter)
9845 : "m" (v->counter));
9846@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
9847 */
9848 static inline void atomic64_dec(atomic64_t *v)
9849 {
9850- asm volatile(LOCK_PREFIX "decq %0"
9851+ asm volatile(LOCK_PREFIX "decq %0\n"
9852+
9853+#ifdef CONFIG_PAX_REFCOUNT
9854+ "jno 0f\n"
9855+ LOCK_PREFIX "incq %0\n"
9856+ "int $4\n0:\n"
9857+ _ASM_EXTABLE(0b, 0b)
9858+#endif
9859+
9860+ : "=m" (v->counter)
9861+ : "m" (v->counter));
9862+}
9863+
9864+/**
9865+ * atomic64_dec_unchecked - decrement atomic64 variable
9866+ * @v: pointer to type atomic64_t
9867+ *
9868+ * Atomically decrements @v by 1.
9869+ */
9870+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9871+{
9872+ asm volatile(LOCK_PREFIX "decq %0\n"
9873 : "=m" (v->counter)
9874 : "m" (v->counter));
9875 }
9876@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9877 {
9878 unsigned char c;
9879
9880- asm volatile(LOCK_PREFIX "decq %0; sete %1"
9881+ asm volatile(LOCK_PREFIX "decq %0\n"
9882+
9883+#ifdef CONFIG_PAX_REFCOUNT
9884+ "jno 0f\n"
9885+ LOCK_PREFIX "incq %0\n"
9886+ "int $4\n0:\n"
9887+ _ASM_EXTABLE(0b, 0b)
9888+#endif
9889+
9890+ "sete %1\n"
9891 : "=m" (v->counter), "=qm" (c)
9892 : "m" (v->counter) : "memory");
9893 return c != 0;
9894@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9895 {
9896 unsigned char c;
9897
9898- asm volatile(LOCK_PREFIX "incq %0; sete %1"
9899+ asm volatile(LOCK_PREFIX "incq %0\n"
9900+
9901+#ifdef CONFIG_PAX_REFCOUNT
9902+ "jno 0f\n"
9903+ LOCK_PREFIX "decq %0\n"
9904+ "int $4\n0:\n"
9905+ _ASM_EXTABLE(0b, 0b)
9906+#endif
9907+
9908+ "sete %1\n"
9909 : "=m" (v->counter), "=qm" (c)
9910 : "m" (v->counter) : "memory");
9911 return c != 0;
9912@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9913 {
9914 unsigned char c;
9915
9916- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9917+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
9918+
9919+#ifdef CONFIG_PAX_REFCOUNT
9920+ "jno 0f\n"
9921+ LOCK_PREFIX "subq %2,%0\n"
9922+ "int $4\n0:\n"
9923+ _ASM_EXTABLE(0b, 0b)
9924+#endif
9925+
9926+ "sets %1\n"
9927 : "=m" (v->counter), "=qm" (c)
9928 : "er" (i), "m" (v->counter) : "memory");
9929 return c;
9930@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9931 */
9932 static inline long atomic64_add_return(long i, atomic64_t *v)
9933 {
9934+ return i + xadd_check_overflow(&v->counter, i);
9935+}
9936+
9937+/**
9938+ * atomic64_add_return_unchecked - add and return
9939+ * @i: integer value to add
9940+ * @v: pointer to type atomic64_unchecked_t
9941+ *
9942+ * Atomically adds @i to @v and returns @i + @v
9943+ */
9944+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9945+{
9946 return i + xadd(&v->counter, i);
9947 }
9948
9949@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9950 }
9951
9952 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9953+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9954+{
9955+ return atomic64_add_return_unchecked(1, v);
9956+}
9957 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9958
9959 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9960@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9961 return cmpxchg(&v->counter, old, new);
9962 }
9963
9964+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9965+{
9966+ return cmpxchg(&v->counter, old, new);
9967+}
9968+
9969 static inline long atomic64_xchg(atomic64_t *v, long new)
9970 {
9971 return xchg(&v->counter, new);
9972@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
9973 */
9974 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9975 {
9976- long c, old;
9977+ long c, old, new;
9978 c = atomic64_read(v);
9979 for (;;) {
9980- if (unlikely(c == (u)))
9981+ if (unlikely(c == u))
9982 break;
9983- old = atomic64_cmpxchg((v), c, c + (a));
9984+
9985+ asm volatile("add %2,%0\n"
9986+
9987+#ifdef CONFIG_PAX_REFCOUNT
9988+ "jno 0f\n"
9989+ "sub %2,%0\n"
9990+ "int $4\n0:\n"
9991+ _ASM_EXTABLE(0b, 0b)
9992+#endif
9993+
9994+ : "=r" (new)
9995+ : "0" (c), "ir" (a));
9996+
9997+ old = atomic64_cmpxchg(v, c, new);
9998 if (likely(old == c))
9999 break;
10000 c = old;
10001 }
10002- return c != (u);
10003+ return c != u;
10004 }
10005
10006 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10007diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
10008index b97596e..9bd48b06 100644
10009--- a/arch/x86/include/asm/bitops.h
10010+++ b/arch/x86/include/asm/bitops.h
10011@@ -38,7 +38,7 @@
10012 * a mask operation on a byte.
10013 */
10014 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
10015-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
10016+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
10017 #define CONST_MASK(nr) (1 << ((nr) & 7))
10018
10019 /**
10020diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
10021index 5e1a2ee..c9f9533 100644
10022--- a/arch/x86/include/asm/boot.h
10023+++ b/arch/x86/include/asm/boot.h
10024@@ -11,10 +11,15 @@
10025 #include <asm/pgtable_types.h>
10026
10027 /* Physical address where kernel should be loaded. */
10028-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10029+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10030 + (CONFIG_PHYSICAL_ALIGN - 1)) \
10031 & ~(CONFIG_PHYSICAL_ALIGN - 1))
10032
10033+#ifndef __ASSEMBLY__
10034+extern unsigned char __LOAD_PHYSICAL_ADDR[];
10035+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
10036+#endif
10037+
10038 /* Minimum kernel alignment, as a power of two */
10039 #ifdef CONFIG_X86_64
10040 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
10041diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
10042index 48f99f1..d78ebf9 100644
10043--- a/arch/x86/include/asm/cache.h
10044+++ b/arch/x86/include/asm/cache.h
10045@@ -5,12 +5,13 @@
10046
10047 /* L1 cache line size */
10048 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10049-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10050+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10051
10052 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
10053+#define __read_only __attribute__((__section__(".data..read_only")))
10054
10055 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
10056-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
10057+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
10058
10059 #ifdef CONFIG_X86_VSMP
10060 #ifdef CONFIG_SMP
10061diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
10062index 9863ee3..4a1f8e1 100644
10063--- a/arch/x86/include/asm/cacheflush.h
10064+++ b/arch/x86/include/asm/cacheflush.h
10065@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
10066 unsigned long pg_flags = pg->flags & _PGMT_MASK;
10067
10068 if (pg_flags == _PGMT_DEFAULT)
10069- return -1;
10070+ return ~0UL;
10071 else if (pg_flags == _PGMT_WC)
10072 return _PAGE_CACHE_WC;
10073 else if (pg_flags == _PGMT_UC_MINUS)
10074diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
10075index 46fc474..b02b0f9 100644
10076--- a/arch/x86/include/asm/checksum_32.h
10077+++ b/arch/x86/include/asm/checksum_32.h
10078@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
10079 int len, __wsum sum,
10080 int *src_err_ptr, int *dst_err_ptr);
10081
10082+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
10083+ int len, __wsum sum,
10084+ int *src_err_ptr, int *dst_err_ptr);
10085+
10086+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
10087+ int len, __wsum sum,
10088+ int *src_err_ptr, int *dst_err_ptr);
10089+
10090 /*
10091 * Note: when you get a NULL pointer exception here this means someone
10092 * passed in an incorrect kernel address to one of these functions.
10093@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
10094 int *err_ptr)
10095 {
10096 might_sleep();
10097- return csum_partial_copy_generic((__force void *)src, dst,
10098+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
10099 len, sum, err_ptr, NULL);
10100 }
10101
10102@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
10103 {
10104 might_sleep();
10105 if (access_ok(VERIFY_WRITE, dst, len))
10106- return csum_partial_copy_generic(src, (__force void *)dst,
10107+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
10108 len, sum, NULL, err_ptr);
10109
10110 if (len)
10111diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
10112index 99480e5..d81165b 100644
10113--- a/arch/x86/include/asm/cmpxchg.h
10114+++ b/arch/x86/include/asm/cmpxchg.h
10115@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
10116 __compiletime_error("Bad argument size for cmpxchg");
10117 extern void __xadd_wrong_size(void)
10118 __compiletime_error("Bad argument size for xadd");
10119+extern void __xadd_check_overflow_wrong_size(void)
10120+ __compiletime_error("Bad argument size for xadd_check_overflow");
10121 extern void __add_wrong_size(void)
10122 __compiletime_error("Bad argument size for add");
10123+extern void __add_check_overflow_wrong_size(void)
10124+ __compiletime_error("Bad argument size for add_check_overflow");
10125
10126 /*
10127 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
10128@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
10129 __ret; \
10130 })
10131
10132+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
10133+ ({ \
10134+ __typeof__ (*(ptr)) __ret = (arg); \
10135+ switch (sizeof(*(ptr))) { \
10136+ case __X86_CASE_L: \
10137+ asm volatile (lock #op "l %0, %1\n" \
10138+ "jno 0f\n" \
10139+ "mov %0,%1\n" \
10140+ "int $4\n0:\n" \
10141+ _ASM_EXTABLE(0b, 0b) \
10142+ : "+r" (__ret), "+m" (*(ptr)) \
10143+ : : "memory", "cc"); \
10144+ break; \
10145+ case __X86_CASE_Q: \
10146+ asm volatile (lock #op "q %q0, %1\n" \
10147+ "jno 0f\n" \
10148+ "mov %0,%1\n" \
10149+ "int $4\n0:\n" \
10150+ _ASM_EXTABLE(0b, 0b) \
10151+ : "+r" (__ret), "+m" (*(ptr)) \
10152+ : : "memory", "cc"); \
10153+ break; \
10154+ default: \
10155+ __ ## op ## _check_overflow_wrong_size(); \
10156+ } \
10157+ __ret; \
10158+ })
10159+
10160 /*
10161 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
10162 * Since this is generally used to protect other memory information, we
10163@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
10164 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
10165 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
10166
10167+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
10168+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
10169+
10170 #define __add(ptr, inc, lock) \
10171 ({ \
10172 __typeof__ (*(ptr)) __ret = (inc); \
10173diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
10174index 340ee49..4238ced 100644
10175--- a/arch/x86/include/asm/cpufeature.h
10176+++ b/arch/x86/include/asm/cpufeature.h
10177@@ -371,7 +371,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
10178 ".section .discard,\"aw\",@progbits\n"
10179 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
10180 ".previous\n"
10181- ".section .altinstr_replacement,\"ax\"\n"
10182+ ".section .altinstr_replacement,\"a\"\n"
10183 "3: movb $1,%0\n"
10184 "4:\n"
10185 ".previous\n"
10186diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
10187index e95822d..a90010e 100644
10188--- a/arch/x86/include/asm/desc.h
10189+++ b/arch/x86/include/asm/desc.h
10190@@ -4,6 +4,7 @@
10191 #include <asm/desc_defs.h>
10192 #include <asm/ldt.h>
10193 #include <asm/mmu.h>
10194+#include <asm/pgtable.h>
10195
10196 #include <linux/smp.h>
10197
10198@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
10199
10200 desc->type = (info->read_exec_only ^ 1) << 1;
10201 desc->type |= info->contents << 2;
10202+ desc->type |= info->seg_not_present ^ 1;
10203
10204 desc->s = 1;
10205 desc->dpl = 0x3;
10206@@ -34,19 +36,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
10207 }
10208
10209 extern struct desc_ptr idt_descr;
10210-extern gate_desc idt_table[];
10211 extern struct desc_ptr nmi_idt_descr;
10212-extern gate_desc nmi_idt_table[];
10213-
10214-struct gdt_page {
10215- struct desc_struct gdt[GDT_ENTRIES];
10216-} __attribute__((aligned(PAGE_SIZE)));
10217-
10218-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
10219+extern gate_desc idt_table[256];
10220+extern gate_desc nmi_idt_table[256];
10221
10222+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
10223 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
10224 {
10225- return per_cpu(gdt_page, cpu).gdt;
10226+ return cpu_gdt_table[cpu];
10227 }
10228
10229 #ifdef CONFIG_X86_64
10230@@ -71,8 +68,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
10231 unsigned long base, unsigned dpl, unsigned flags,
10232 unsigned short seg)
10233 {
10234- gate->a = (seg << 16) | (base & 0xffff);
10235- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
10236+ gate->gate.offset_low = base;
10237+ gate->gate.seg = seg;
10238+ gate->gate.reserved = 0;
10239+ gate->gate.type = type;
10240+ gate->gate.s = 0;
10241+ gate->gate.dpl = dpl;
10242+ gate->gate.p = 1;
10243+ gate->gate.offset_high = base >> 16;
10244 }
10245
10246 #endif
10247@@ -117,12 +120,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
10248
10249 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
10250 {
10251+ pax_open_kernel();
10252 memcpy(&idt[entry], gate, sizeof(*gate));
10253+ pax_close_kernel();
10254 }
10255
10256 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
10257 {
10258+ pax_open_kernel();
10259 memcpy(&ldt[entry], desc, 8);
10260+ pax_close_kernel();
10261 }
10262
10263 static inline void
10264@@ -136,7 +143,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
10265 default: size = sizeof(*gdt); break;
10266 }
10267
10268+ pax_open_kernel();
10269 memcpy(&gdt[entry], desc, size);
10270+ pax_close_kernel();
10271 }
10272
10273 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
10274@@ -209,7 +218,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
10275
10276 static inline void native_load_tr_desc(void)
10277 {
10278+ pax_open_kernel();
10279 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
10280+ pax_close_kernel();
10281 }
10282
10283 static inline void native_load_gdt(const struct desc_ptr *dtr)
10284@@ -246,8 +257,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
10285 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10286 unsigned int i;
10287
10288+ pax_open_kernel();
10289 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10290 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
10291+ pax_close_kernel();
10292 }
10293
10294 #define _LDT_empty(info) \
10295@@ -310,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10296 }
10297
10298 #ifdef CONFIG_X86_64
10299-static inline void set_nmi_gate(int gate, void *addr)
10300+static inline void set_nmi_gate(int gate, const void *addr)
10301 {
10302 gate_desc s;
10303
10304@@ -319,7 +332,7 @@ static inline void set_nmi_gate(int gate, void *addr)
10305 }
10306 #endif
10307
10308-static inline void _set_gate(int gate, unsigned type, void *addr,
10309+static inline void _set_gate(int gate, unsigned type, const void *addr,
10310 unsigned dpl, unsigned ist, unsigned seg)
10311 {
10312 gate_desc s;
10313@@ -338,7 +351,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
10314 * Pentium F0 0F bugfix can have resulted in the mapped
10315 * IDT being write-protected.
10316 */
10317-static inline void set_intr_gate(unsigned int n, void *addr)
10318+static inline void set_intr_gate(unsigned int n, const void *addr)
10319 {
10320 BUG_ON((unsigned)n > 0xFF);
10321 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
10322@@ -368,19 +381,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
10323 /*
10324 * This routine sets up an interrupt gate at directory privilege level 3.
10325 */
10326-static inline void set_system_intr_gate(unsigned int n, void *addr)
10327+static inline void set_system_intr_gate(unsigned int n, const void *addr)
10328 {
10329 BUG_ON((unsigned)n > 0xFF);
10330 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10331 }
10332
10333-static inline void set_system_trap_gate(unsigned int n, void *addr)
10334+static inline void set_system_trap_gate(unsigned int n, const void *addr)
10335 {
10336 BUG_ON((unsigned)n > 0xFF);
10337 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10338 }
10339
10340-static inline void set_trap_gate(unsigned int n, void *addr)
10341+static inline void set_trap_gate(unsigned int n, const void *addr)
10342 {
10343 BUG_ON((unsigned)n > 0xFF);
10344 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
10345@@ -389,19 +402,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
10346 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10347 {
10348 BUG_ON((unsigned)n > 0xFF);
10349- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10350+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10351 }
10352
10353-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10354+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10355 {
10356 BUG_ON((unsigned)n > 0xFF);
10357 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10358 }
10359
10360-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10361+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10362 {
10363 BUG_ON((unsigned)n > 0xFF);
10364 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10365 }
10366
10367+#ifdef CONFIG_X86_32
10368+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10369+{
10370+ struct desc_struct d;
10371+
10372+ if (likely(limit))
10373+ limit = (limit - 1UL) >> PAGE_SHIFT;
10374+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
10375+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10376+}
10377+#endif
10378+
10379 #endif /* _ASM_X86_DESC_H */
10380diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10381index 278441f..b95a174 100644
10382--- a/arch/x86/include/asm/desc_defs.h
10383+++ b/arch/x86/include/asm/desc_defs.h
10384@@ -31,6 +31,12 @@ struct desc_struct {
10385 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10386 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10387 };
10388+ struct {
10389+ u16 offset_low;
10390+ u16 seg;
10391+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10392+ unsigned offset_high: 16;
10393+ } gate;
10394 };
10395 } __attribute__((packed));
10396
10397diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
10398index 3778256..c5d4fce 100644
10399--- a/arch/x86/include/asm/e820.h
10400+++ b/arch/x86/include/asm/e820.h
10401@@ -69,7 +69,7 @@ struct e820map {
10402 #define ISA_START_ADDRESS 0xa0000
10403 #define ISA_END_ADDRESS 0x100000
10404
10405-#define BIOS_BEGIN 0x000a0000
10406+#define BIOS_BEGIN 0x000c0000
10407 #define BIOS_END 0x00100000
10408
10409 #define BIOS_ROM_BASE 0xffe00000
10410diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
10411index 5939f44..f8845f6 100644
10412--- a/arch/x86/include/asm/elf.h
10413+++ b/arch/x86/include/asm/elf.h
10414@@ -243,7 +243,25 @@ extern int force_personality32;
10415 the loader. We need to make sure that it is out of the way of the program
10416 that it will "exec", and that there is sufficient room for the brk. */
10417
10418+#ifdef CONFIG_PAX_SEGMEXEC
10419+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
10420+#else
10421 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
10422+#endif
10423+
10424+#ifdef CONFIG_PAX_ASLR
10425+#ifdef CONFIG_X86_32
10426+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
10427+
10428+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10429+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10430+#else
10431+#define PAX_ELF_ET_DYN_BASE 0x400000UL
10432+
10433+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10434+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10435+#endif
10436+#endif
10437
10438 /* This yields a mask that user programs can use to figure out what
10439 instruction set this CPU supports. This could be done in user space,
10440@@ -296,16 +314,12 @@ do { \
10441
10442 #define ARCH_DLINFO \
10443 do { \
10444- if (vdso_enabled) \
10445- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10446- (unsigned long)current->mm->context.vdso); \
10447+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10448 } while (0)
10449
10450 #define ARCH_DLINFO_X32 \
10451 do { \
10452- if (vdso_enabled) \
10453- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10454- (unsigned long)current->mm->context.vdso); \
10455+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10456 } while (0)
10457
10458 #define AT_SYSINFO 32
10459@@ -320,7 +334,7 @@ else \
10460
10461 #endif /* !CONFIG_X86_32 */
10462
10463-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10464+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10465
10466 #define VDSO_ENTRY \
10467 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10468@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
10469 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10470 #define compat_arch_setup_additional_pages syscall32_setup_pages
10471
10472-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10473-#define arch_randomize_brk arch_randomize_brk
10474-
10475 /*
10476 * True on X86_32 or when emulating IA32 on X86_64
10477 */
10478diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10479index cc70c1c..d96d011 100644
10480--- a/arch/x86/include/asm/emergency-restart.h
10481+++ b/arch/x86/include/asm/emergency-restart.h
10482@@ -15,6 +15,6 @@ enum reboot_type {
10483
10484 extern enum reboot_type reboot_type;
10485
10486-extern void machine_emergency_restart(void);
10487+extern void machine_emergency_restart(void) __noreturn;
10488
10489 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10490diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
10491index 4fa8815..71b121a 100644
10492--- a/arch/x86/include/asm/fpu-internal.h
10493+++ b/arch/x86/include/asm/fpu-internal.h
10494@@ -86,6 +86,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10495 {
10496 int err;
10497
10498+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10499+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10500+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
10501+#endif
10502+
10503 /* See comment in fxsave() below. */
10504 #ifdef CONFIG_AS_FXSAVEQ
10505 asm volatile("1: fxrstorq %[fx]\n\t"
10506@@ -115,6 +120,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10507 {
10508 int err;
10509
10510+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10511+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10512+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10513+#endif
10514+
10515 /*
10516 * Clear the bytes not touched by the fxsave and reserved
10517 * for the SW usage.
10518@@ -271,7 +281,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
10519 "emms\n\t" /* clear stack tags */
10520 "fildl %P[addr]", /* set F?P to defined value */
10521 X86_FEATURE_FXSAVE_LEAK,
10522- [addr] "m" (tsk->thread.fpu.has_fpu));
10523+ [addr] "m" (init_tss[smp_processor_id()].x86_tss.sp0));
10524
10525 return fpu_restore_checking(&tsk->thread.fpu);
10526 }
10527diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10528index 71ecbcb..bac10b7 100644
10529--- a/arch/x86/include/asm/futex.h
10530+++ b/arch/x86/include/asm/futex.h
10531@@ -11,16 +11,18 @@
10532 #include <asm/processor.h>
10533
10534 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10535+ typecheck(u32 __user *, uaddr); \
10536 asm volatile("1:\t" insn "\n" \
10537 "2:\t.section .fixup,\"ax\"\n" \
10538 "3:\tmov\t%3, %1\n" \
10539 "\tjmp\t2b\n" \
10540 "\t.previous\n" \
10541 _ASM_EXTABLE(1b, 3b) \
10542- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10543+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10544 : "i" (-EFAULT), "0" (oparg), "1" (0))
10545
10546 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10547+ typecheck(u32 __user *, uaddr); \
10548 asm volatile("1:\tmovl %2, %0\n" \
10549 "\tmovl\t%0, %3\n" \
10550 "\t" insn "\n" \
10551@@ -33,7 +35,7 @@
10552 _ASM_EXTABLE(1b, 4b) \
10553 _ASM_EXTABLE(2b, 4b) \
10554 : "=&a" (oldval), "=&r" (ret), \
10555- "+m" (*uaddr), "=&r" (tem) \
10556+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10557 : "r" (oparg), "i" (-EFAULT), "1" (0))
10558
10559 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10560@@ -60,10 +62,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10561
10562 switch (op) {
10563 case FUTEX_OP_SET:
10564- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10565+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10566 break;
10567 case FUTEX_OP_ADD:
10568- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10569+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10570 uaddr, oparg);
10571 break;
10572 case FUTEX_OP_OR:
10573@@ -122,13 +124,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
10574 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10575 return -EFAULT;
10576
10577- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
10578+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
10579 "2:\t.section .fixup, \"ax\"\n"
10580 "3:\tmov %3, %0\n"
10581 "\tjmp 2b\n"
10582 "\t.previous\n"
10583 _ASM_EXTABLE(1b, 3b)
10584- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
10585+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
10586 : "i" (-EFAULT), "r" (newval), "1" (oldval)
10587 : "memory"
10588 );
10589diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10590index eb92a6e..b98b2f4 100644
10591--- a/arch/x86/include/asm/hw_irq.h
10592+++ b/arch/x86/include/asm/hw_irq.h
10593@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
10594 extern void enable_IO_APIC(void);
10595
10596 /* Statistics */
10597-extern atomic_t irq_err_count;
10598-extern atomic_t irq_mis_count;
10599+extern atomic_unchecked_t irq_err_count;
10600+extern atomic_unchecked_t irq_mis_count;
10601
10602 /* EISA */
10603 extern void eisa_set_level_irq(unsigned int irq);
10604diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
10605index d8e8eef..99f81ae 100644
10606--- a/arch/x86/include/asm/io.h
10607+++ b/arch/x86/include/asm/io.h
10608@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
10609
10610 #include <linux/vmalloc.h>
10611
10612+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10613+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10614+{
10615+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10616+}
10617+
10618+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10619+{
10620+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10621+}
10622+
10623 /*
10624 * Convert a virtual cached pointer to an uncached pointer
10625 */
10626diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10627index bba3cf8..06bc8da 100644
10628--- a/arch/x86/include/asm/irqflags.h
10629+++ b/arch/x86/include/asm/irqflags.h
10630@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
10631 sti; \
10632 sysexit
10633
10634+#define GET_CR0_INTO_RDI mov %cr0, %rdi
10635+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10636+#define GET_CR3_INTO_RDI mov %cr3, %rdi
10637+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10638+
10639 #else
10640 #define INTERRUPT_RETURN iret
10641 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10642diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10643index 5478825..839e88c 100644
10644--- a/arch/x86/include/asm/kprobes.h
10645+++ b/arch/x86/include/asm/kprobes.h
10646@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
10647 #define RELATIVEJUMP_SIZE 5
10648 #define RELATIVECALL_OPCODE 0xe8
10649 #define RELATIVE_ADDR_SIZE 4
10650-#define MAX_STACK_SIZE 64
10651-#define MIN_STACK_SIZE(ADDR) \
10652- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10653- THREAD_SIZE - (unsigned long)(ADDR))) \
10654- ? (MAX_STACK_SIZE) \
10655- : (((unsigned long)current_thread_info()) + \
10656- THREAD_SIZE - (unsigned long)(ADDR)))
10657+#define MAX_STACK_SIZE 64UL
10658+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10659
10660 #define flush_insn_slot(p) do { } while (0)
10661
10662diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10663index e216ba0..453f6ec 100644
10664--- a/arch/x86/include/asm/kvm_host.h
10665+++ b/arch/x86/include/asm/kvm_host.h
10666@@ -679,7 +679,7 @@ struct kvm_x86_ops {
10667 int (*check_intercept)(struct kvm_vcpu *vcpu,
10668 struct x86_instruction_info *info,
10669 enum x86_intercept_stage stage);
10670-};
10671+} __do_const;
10672
10673 struct kvm_arch_async_pf {
10674 u32 token;
10675diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10676index c8bed0d..e5721fa 100644
10677--- a/arch/x86/include/asm/local.h
10678+++ b/arch/x86/include/asm/local.h
10679@@ -17,26 +17,58 @@ typedef struct {
10680
10681 static inline void local_inc(local_t *l)
10682 {
10683- asm volatile(_ASM_INC "%0"
10684+ asm volatile(_ASM_INC "%0\n"
10685+
10686+#ifdef CONFIG_PAX_REFCOUNT
10687+ "jno 0f\n"
10688+ _ASM_DEC "%0\n"
10689+ "int $4\n0:\n"
10690+ _ASM_EXTABLE(0b, 0b)
10691+#endif
10692+
10693 : "+m" (l->a.counter));
10694 }
10695
10696 static inline void local_dec(local_t *l)
10697 {
10698- asm volatile(_ASM_DEC "%0"
10699+ asm volatile(_ASM_DEC "%0\n"
10700+
10701+#ifdef CONFIG_PAX_REFCOUNT
10702+ "jno 0f\n"
10703+ _ASM_INC "%0\n"
10704+ "int $4\n0:\n"
10705+ _ASM_EXTABLE(0b, 0b)
10706+#endif
10707+
10708 : "+m" (l->a.counter));
10709 }
10710
10711 static inline void local_add(long i, local_t *l)
10712 {
10713- asm volatile(_ASM_ADD "%1,%0"
10714+ asm volatile(_ASM_ADD "%1,%0\n"
10715+
10716+#ifdef CONFIG_PAX_REFCOUNT
10717+ "jno 0f\n"
10718+ _ASM_SUB "%1,%0\n"
10719+ "int $4\n0:\n"
10720+ _ASM_EXTABLE(0b, 0b)
10721+#endif
10722+
10723 : "+m" (l->a.counter)
10724 : "ir" (i));
10725 }
10726
10727 static inline void local_sub(long i, local_t *l)
10728 {
10729- asm volatile(_ASM_SUB "%1,%0"
10730+ asm volatile(_ASM_SUB "%1,%0\n"
10731+
10732+#ifdef CONFIG_PAX_REFCOUNT
10733+ "jno 0f\n"
10734+ _ASM_ADD "%1,%0\n"
10735+ "int $4\n0:\n"
10736+ _ASM_EXTABLE(0b, 0b)
10737+#endif
10738+
10739 : "+m" (l->a.counter)
10740 : "ir" (i));
10741 }
10742@@ -54,7 +86,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10743 {
10744 unsigned char c;
10745
10746- asm volatile(_ASM_SUB "%2,%0; sete %1"
10747+ asm volatile(_ASM_SUB "%2,%0\n"
10748+
10749+#ifdef CONFIG_PAX_REFCOUNT
10750+ "jno 0f\n"
10751+ _ASM_ADD "%2,%0\n"
10752+ "int $4\n0:\n"
10753+ _ASM_EXTABLE(0b, 0b)
10754+#endif
10755+
10756+ "sete %1\n"
10757 : "+m" (l->a.counter), "=qm" (c)
10758 : "ir" (i) : "memory");
10759 return c;
10760@@ -72,7 +113,16 @@ static inline int local_dec_and_test(local_t *l)
10761 {
10762 unsigned char c;
10763
10764- asm volatile(_ASM_DEC "%0; sete %1"
10765+ asm volatile(_ASM_DEC "%0\n"
10766+
10767+#ifdef CONFIG_PAX_REFCOUNT
10768+ "jno 0f\n"
10769+ _ASM_INC "%0\n"
10770+ "int $4\n0:\n"
10771+ _ASM_EXTABLE(0b, 0b)
10772+#endif
10773+
10774+ "sete %1\n"
10775 : "+m" (l->a.counter), "=qm" (c)
10776 : : "memory");
10777 return c != 0;
10778@@ -90,7 +140,16 @@ static inline int local_inc_and_test(local_t *l)
10779 {
10780 unsigned char c;
10781
10782- asm volatile(_ASM_INC "%0; sete %1"
10783+ asm volatile(_ASM_INC "%0\n"
10784+
10785+#ifdef CONFIG_PAX_REFCOUNT
10786+ "jno 0f\n"
10787+ _ASM_DEC "%0\n"
10788+ "int $4\n0:\n"
10789+ _ASM_EXTABLE(0b, 0b)
10790+#endif
10791+
10792+ "sete %1\n"
10793 : "+m" (l->a.counter), "=qm" (c)
10794 : : "memory");
10795 return c != 0;
10796@@ -109,7 +168,16 @@ static inline int local_add_negative(long i, local_t *l)
10797 {
10798 unsigned char c;
10799
10800- asm volatile(_ASM_ADD "%2,%0; sets %1"
10801+ asm volatile(_ASM_ADD "%2,%0\n"
10802+
10803+#ifdef CONFIG_PAX_REFCOUNT
10804+ "jno 0f\n"
10805+ _ASM_SUB "%2,%0\n"
10806+ "int $4\n0:\n"
10807+ _ASM_EXTABLE(0b, 0b)
10808+#endif
10809+
10810+ "sets %1\n"
10811 : "+m" (l->a.counter), "=qm" (c)
10812 : "ir" (i) : "memory");
10813 return c;
10814@@ -132,7 +200,15 @@ static inline long local_add_return(long i, local_t *l)
10815 #endif
10816 /* Modern 486+ processor */
10817 __i = i;
10818- asm volatile(_ASM_XADD "%0, %1;"
10819+ asm volatile(_ASM_XADD "%0, %1\n"
10820+
10821+#ifdef CONFIG_PAX_REFCOUNT
10822+ "jno 0f\n"
10823+ _ASM_MOV "%0,%1\n"
10824+ "int $4\n0:\n"
10825+ _ASM_EXTABLE(0b, 0b)
10826+#endif
10827+
10828 : "+r" (i), "+m" (l->a.counter)
10829 : : "memory");
10830 return i + __i;
10831diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10832index 593e51d..fa69c9a 100644
10833--- a/arch/x86/include/asm/mman.h
10834+++ b/arch/x86/include/asm/mman.h
10835@@ -5,4 +5,14 @@
10836
10837 #include <asm-generic/mman.h>
10838
10839+#ifdef __KERNEL__
10840+#ifndef __ASSEMBLY__
10841+#ifdef CONFIG_X86_32
10842+#define arch_mmap_check i386_mmap_check
10843+int i386_mmap_check(unsigned long addr, unsigned long len,
10844+ unsigned long flags);
10845+#endif
10846+#endif
10847+#endif
10848+
10849 #endif /* _ASM_X86_MMAN_H */
10850diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10851index 5f55e69..e20bfb1 100644
10852--- a/arch/x86/include/asm/mmu.h
10853+++ b/arch/x86/include/asm/mmu.h
10854@@ -9,7 +9,7 @@
10855 * we put the segment information here.
10856 */
10857 typedef struct {
10858- void *ldt;
10859+ struct desc_struct *ldt;
10860 int size;
10861
10862 #ifdef CONFIG_X86_64
10863@@ -18,7 +18,19 @@ typedef struct {
10864 #endif
10865
10866 struct mutex lock;
10867- void *vdso;
10868+ unsigned long vdso;
10869+
10870+#ifdef CONFIG_X86_32
10871+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10872+ unsigned long user_cs_base;
10873+ unsigned long user_cs_limit;
10874+
10875+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10876+ cpumask_t cpu_user_cs_mask;
10877+#endif
10878+
10879+#endif
10880+#endif
10881 } mm_context_t;
10882
10883 #ifdef CONFIG_SMP
10884diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10885index 6902152..da4283a 100644
10886--- a/arch/x86/include/asm/mmu_context.h
10887+++ b/arch/x86/include/asm/mmu_context.h
10888@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10889
10890 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10891 {
10892+
10893+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10894+ unsigned int i;
10895+ pgd_t *pgd;
10896+
10897+ pax_open_kernel();
10898+ pgd = get_cpu_pgd(smp_processor_id());
10899+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10900+ set_pgd_batched(pgd+i, native_make_pgd(0));
10901+ pax_close_kernel();
10902+#endif
10903+
10904 #ifdef CONFIG_SMP
10905 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10906 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10907@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10908 struct task_struct *tsk)
10909 {
10910 unsigned cpu = smp_processor_id();
10911+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10912+ int tlbstate = TLBSTATE_OK;
10913+#endif
10914
10915 if (likely(prev != next)) {
10916 #ifdef CONFIG_SMP
10917+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10918+ tlbstate = percpu_read(cpu_tlbstate.state);
10919+#endif
10920 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10921 percpu_write(cpu_tlbstate.active_mm, next);
10922 #endif
10923 cpumask_set_cpu(cpu, mm_cpumask(next));
10924
10925 /* Re-load page tables */
10926+#ifdef CONFIG_PAX_PER_CPU_PGD
10927+ pax_open_kernel();
10928+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
10929+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
10930+ pax_close_kernel();
10931+ load_cr3(get_cpu_pgd(cpu));
10932+#else
10933 load_cr3(next->pgd);
10934+#endif
10935
10936 /* stop flush ipis for the previous mm */
10937 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10938@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10939 */
10940 if (unlikely(prev->context.ldt != next->context.ldt))
10941 load_LDT_nolock(&next->context);
10942- }
10943+
10944+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10945+ if (!(__supported_pte_mask & _PAGE_NX)) {
10946+ smp_mb__before_clear_bit();
10947+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10948+ smp_mb__after_clear_bit();
10949+ cpu_set(cpu, next->context.cpu_user_cs_mask);
10950+ }
10951+#endif
10952+
10953+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10954+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10955+ prev->context.user_cs_limit != next->context.user_cs_limit))
10956+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10957 #ifdef CONFIG_SMP
10958+ else if (unlikely(tlbstate != TLBSTATE_OK))
10959+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10960+#endif
10961+#endif
10962+
10963+ }
10964 else {
10965+
10966+#ifdef CONFIG_PAX_PER_CPU_PGD
10967+ pax_open_kernel();
10968+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
10969+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
10970+ pax_close_kernel();
10971+ load_cr3(get_cpu_pgd(cpu));
10972+#endif
10973+
10974+#ifdef CONFIG_SMP
10975 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10976 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10977
10978@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10979 * tlb flush IPI delivery. We must reload CR3
10980 * to make sure to use no freed page tables.
10981 */
10982+
10983+#ifndef CONFIG_PAX_PER_CPU_PGD
10984 load_cr3(next->pgd);
10985+#endif
10986+
10987 load_LDT_nolock(&next->context);
10988+
10989+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10990+ if (!(__supported_pte_mask & _PAGE_NX))
10991+ cpu_set(cpu, next->context.cpu_user_cs_mask);
10992+#endif
10993+
10994+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10995+#ifdef CONFIG_PAX_PAGEEXEC
10996+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
10997+#endif
10998+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10999+#endif
11000+
11001 }
11002+#endif
11003 }
11004-#endif
11005 }
11006
11007 #define activate_mm(prev, next) \
11008diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
11009index 9eae775..c914fea 100644
11010--- a/arch/x86/include/asm/module.h
11011+++ b/arch/x86/include/asm/module.h
11012@@ -5,6 +5,7 @@
11013
11014 #ifdef CONFIG_X86_64
11015 /* X86_64 does not define MODULE_PROC_FAMILY */
11016+#define MODULE_PROC_FAMILY ""
11017 #elif defined CONFIG_M386
11018 #define MODULE_PROC_FAMILY "386 "
11019 #elif defined CONFIG_M486
11020@@ -59,8 +60,20 @@
11021 #error unknown processor family
11022 #endif
11023
11024-#ifdef CONFIG_X86_32
11025-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
11026+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
11027+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
11028+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
11029+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
11030+#else
11031+#define MODULE_PAX_KERNEXEC ""
11032 #endif
11033
11034+#ifdef CONFIG_PAX_MEMORY_UDEREF
11035+#define MODULE_PAX_UDEREF "UDEREF "
11036+#else
11037+#define MODULE_PAX_UDEREF ""
11038+#endif
11039+
11040+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
11041+
11042 #endif /* _ASM_X86_MODULE_H */
11043diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
11044index 7639dbf..e08a58c 100644
11045--- a/arch/x86/include/asm/page_64_types.h
11046+++ b/arch/x86/include/asm/page_64_types.h
11047@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
11048
11049 /* duplicated to the one in bootmem.h */
11050 extern unsigned long max_pfn;
11051-extern unsigned long phys_base;
11052+extern const unsigned long phys_base;
11053
11054 extern unsigned long __phys_addr(unsigned long);
11055 #define __phys_reloc_hide(x) (x)
11056diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
11057index aa0f913..0c5bc6a 100644
11058--- a/arch/x86/include/asm/paravirt.h
11059+++ b/arch/x86/include/asm/paravirt.h
11060@@ -668,6 +668,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
11061 val);
11062 }
11063
11064+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11065+{
11066+ pgdval_t val = native_pgd_val(pgd);
11067+
11068+ if (sizeof(pgdval_t) > sizeof(long))
11069+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
11070+ val, (u64)val >> 32);
11071+ else
11072+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
11073+ val);
11074+}
11075+
11076 static inline void pgd_clear(pgd_t *pgdp)
11077 {
11078 set_pgd(pgdp, __pgd(0));
11079@@ -749,6 +761,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
11080 pv_mmu_ops.set_fixmap(idx, phys, flags);
11081 }
11082
11083+#ifdef CONFIG_PAX_KERNEXEC
11084+static inline unsigned long pax_open_kernel(void)
11085+{
11086+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
11087+}
11088+
11089+static inline unsigned long pax_close_kernel(void)
11090+{
11091+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
11092+}
11093+#else
11094+static inline unsigned long pax_open_kernel(void) { return 0; }
11095+static inline unsigned long pax_close_kernel(void) { return 0; }
11096+#endif
11097+
11098 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
11099
11100 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
11101@@ -965,7 +992,7 @@ extern void default_banner(void);
11102
11103 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
11104 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
11105-#define PARA_INDIRECT(addr) *%cs:addr
11106+#define PARA_INDIRECT(addr) *%ss:addr
11107 #endif
11108
11109 #define INTERRUPT_RETURN \
11110@@ -1042,6 +1069,21 @@ extern void default_banner(void);
11111 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
11112 CLBR_NONE, \
11113 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
11114+
11115+#define GET_CR0_INTO_RDI \
11116+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
11117+ mov %rax,%rdi
11118+
11119+#define SET_RDI_INTO_CR0 \
11120+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11121+
11122+#define GET_CR3_INTO_RDI \
11123+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
11124+ mov %rax,%rdi
11125+
11126+#define SET_RDI_INTO_CR3 \
11127+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
11128+
11129 #endif /* CONFIG_X86_32 */
11130
11131 #endif /* __ASSEMBLY__ */
11132diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
11133index 8e8b9a4..f07d725 100644
11134--- a/arch/x86/include/asm/paravirt_types.h
11135+++ b/arch/x86/include/asm/paravirt_types.h
11136@@ -84,20 +84,20 @@ struct pv_init_ops {
11137 */
11138 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
11139 unsigned long addr, unsigned len);
11140-};
11141+} __no_const;
11142
11143
11144 struct pv_lazy_ops {
11145 /* Set deferred update mode, used for batching operations. */
11146 void (*enter)(void);
11147 void (*leave)(void);
11148-};
11149+} __no_const;
11150
11151 struct pv_time_ops {
11152 unsigned long long (*sched_clock)(void);
11153 unsigned long long (*steal_clock)(int cpu);
11154 unsigned long (*get_tsc_khz)(void);
11155-};
11156+} __no_const;
11157
11158 struct pv_cpu_ops {
11159 /* hooks for various privileged instructions */
11160@@ -193,7 +193,7 @@ struct pv_cpu_ops {
11161
11162 void (*start_context_switch)(struct task_struct *prev);
11163 void (*end_context_switch)(struct task_struct *next);
11164-};
11165+} __no_const;
11166
11167 struct pv_irq_ops {
11168 /*
11169@@ -224,7 +224,7 @@ struct pv_apic_ops {
11170 unsigned long start_eip,
11171 unsigned long start_esp);
11172 #endif
11173-};
11174+} __no_const;
11175
11176 struct pv_mmu_ops {
11177 unsigned long (*read_cr2)(void);
11178@@ -313,6 +313,7 @@ struct pv_mmu_ops {
11179 struct paravirt_callee_save make_pud;
11180
11181 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
11182+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
11183 #endif /* PAGETABLE_LEVELS == 4 */
11184 #endif /* PAGETABLE_LEVELS >= 3 */
11185
11186@@ -324,6 +325,12 @@ struct pv_mmu_ops {
11187 an mfn. We can tell which is which from the index. */
11188 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
11189 phys_addr_t phys, pgprot_t flags);
11190+
11191+#ifdef CONFIG_PAX_KERNEXEC
11192+ unsigned long (*pax_open_kernel)(void);
11193+ unsigned long (*pax_close_kernel)(void);
11194+#endif
11195+
11196 };
11197
11198 struct arch_spinlock;
11199@@ -334,7 +341,7 @@ struct pv_lock_ops {
11200 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
11201 int (*spin_trylock)(struct arch_spinlock *lock);
11202 void (*spin_unlock)(struct arch_spinlock *lock);
11203-};
11204+} __no_const;
11205
11206 /* This contains all the paravirt structures: we get a convenient
11207 * number for each function using the offset which we use to indicate
11208diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
11209index b4389a4..7024269 100644
11210--- a/arch/x86/include/asm/pgalloc.h
11211+++ b/arch/x86/include/asm/pgalloc.h
11212@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
11213 pmd_t *pmd, pte_t *pte)
11214 {
11215 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11216+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
11217+}
11218+
11219+static inline void pmd_populate_user(struct mm_struct *mm,
11220+ pmd_t *pmd, pte_t *pte)
11221+{
11222+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11223 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
11224 }
11225
11226@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
11227
11228 #ifdef CONFIG_X86_PAE
11229 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
11230+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
11231+{
11232+ pud_populate(mm, pudp, pmd);
11233+}
11234 #else /* !CONFIG_X86_PAE */
11235 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11236 {
11237 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11238 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
11239 }
11240+
11241+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11242+{
11243+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11244+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
11245+}
11246 #endif /* CONFIG_X86_PAE */
11247
11248 #if PAGETABLE_LEVELS > 3
11249@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11250 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
11251 }
11252
11253+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11254+{
11255+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
11256+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
11257+}
11258+
11259 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
11260 {
11261 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
11262diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
11263index 98391db..8f6984e 100644
11264--- a/arch/x86/include/asm/pgtable-2level.h
11265+++ b/arch/x86/include/asm/pgtable-2level.h
11266@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
11267
11268 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11269 {
11270+ pax_open_kernel();
11271 *pmdp = pmd;
11272+ pax_close_kernel();
11273 }
11274
11275 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11276diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
11277index effff47..bbb8295 100644
11278--- a/arch/x86/include/asm/pgtable-3level.h
11279+++ b/arch/x86/include/asm/pgtable-3level.h
11280@@ -31,6 +31,56 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
11281 ptep->pte_low = pte.pte_low;
11282 }
11283
11284+#define __HAVE_ARCH_READ_PMD_ATOMIC
11285+/*
11286+ * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
11287+ * a "*pmdp" dereference done by gcc. Problem is, in certain places
11288+ * where pte_offset_map_lock is called, concurrent page faults are
11289+ * allowed, if the mmap_sem is hold for reading. An example is mincore
11290+ * vs page faults vs MADV_DONTNEED. On the page fault side
11291+ * pmd_populate rightfully does a set_64bit, but if we're reading the
11292+ * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
11293+ * because gcc will not read the 64bit of the pmd atomically. To fix
11294+ * this all places running pmd_offset_map_lock() while holding the
11295+ * mmap_sem in read mode, shall read the pmdp pointer using this
11296+ * function to know if the pmd is null nor not, and in turn to know if
11297+ * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
11298+ * operations.
11299+ *
11300+ * Without THP if the mmap_sem is hold for reading, the
11301+ * pmd can only transition from null to not null while read_pmd_atomic runs.
11302+ * So there's no need of literally reading it atomically.
11303+ *
11304+ * With THP if the mmap_sem is hold for reading, the pmd can become
11305+ * THP or null or point to a pte (and in turn become "stable") at any
11306+ * time under read_pmd_atomic, so it's mandatory to read it atomically
11307+ * with cmpxchg8b.
11308+ */
11309+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
11310+static inline pmd_t read_pmd_atomic(pmd_t *pmdp)
11311+{
11312+ pmdval_t ret;
11313+ u32 *tmp = (u32 *)pmdp;
11314+
11315+ ret = (pmdval_t) (*tmp);
11316+ if (ret) {
11317+ /*
11318+ * If the low part is null, we must not read the high part
11319+ * or we can end up with a partial pmd.
11320+ */
11321+ smp_rmb();
11322+ ret |= ((pmdval_t)*(tmp + 1)) << 32;
11323+ }
11324+
11325+ return __pmd(ret);
11326+}
11327+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
11328+static inline pmd_t read_pmd_atomic(pmd_t *pmdp)
11329+{
11330+ return __pmd(atomic64_read((atomic64_t *)pmdp));
11331+}
11332+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
11333+
11334 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11335 {
11336 set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
11337@@ -38,12 +88,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11338
11339 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11340 {
11341+ pax_open_kernel();
11342 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
11343+ pax_close_kernel();
11344 }
11345
11346 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11347 {
11348+ pax_open_kernel();
11349 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
11350+ pax_close_kernel();
11351 }
11352
11353 /*
11354diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
11355index 49afb3f..91a8c63 100644
11356--- a/arch/x86/include/asm/pgtable.h
11357+++ b/arch/x86/include/asm/pgtable.h
11358@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11359
11360 #ifndef __PAGETABLE_PUD_FOLDED
11361 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
11362+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
11363 #define pgd_clear(pgd) native_pgd_clear(pgd)
11364 #endif
11365
11366@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11367
11368 #define arch_end_context_switch(prev) do {} while(0)
11369
11370+#define pax_open_kernel() native_pax_open_kernel()
11371+#define pax_close_kernel() native_pax_close_kernel()
11372 #endif /* CONFIG_PARAVIRT */
11373
11374+#define __HAVE_ARCH_PAX_OPEN_KERNEL
11375+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
11376+
11377+#ifdef CONFIG_PAX_KERNEXEC
11378+static inline unsigned long native_pax_open_kernel(void)
11379+{
11380+ unsigned long cr0;
11381+
11382+ preempt_disable();
11383+ barrier();
11384+ cr0 = read_cr0() ^ X86_CR0_WP;
11385+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
11386+ write_cr0(cr0);
11387+ return cr0 ^ X86_CR0_WP;
11388+}
11389+
11390+static inline unsigned long native_pax_close_kernel(void)
11391+{
11392+ unsigned long cr0;
11393+
11394+ cr0 = read_cr0() ^ X86_CR0_WP;
11395+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11396+ write_cr0(cr0);
11397+ barrier();
11398+ preempt_enable_no_resched();
11399+ return cr0 ^ X86_CR0_WP;
11400+}
11401+#else
11402+static inline unsigned long native_pax_open_kernel(void) { return 0; }
11403+static inline unsigned long native_pax_close_kernel(void) { return 0; }
11404+#endif
11405+
11406 /*
11407 * The following only work if pte_present() is true.
11408 * Undefined behaviour if not..
11409 */
11410+static inline int pte_user(pte_t pte)
11411+{
11412+ return pte_val(pte) & _PAGE_USER;
11413+}
11414+
11415 static inline int pte_dirty(pte_t pte)
11416 {
11417 return pte_flags(pte) & _PAGE_DIRTY;
11418@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11419 return pte_clear_flags(pte, _PAGE_RW);
11420 }
11421
11422+static inline pte_t pte_mkread(pte_t pte)
11423+{
11424+ return __pte(pte_val(pte) | _PAGE_USER);
11425+}
11426+
11427 static inline pte_t pte_mkexec(pte_t pte)
11428 {
11429- return pte_clear_flags(pte, _PAGE_NX);
11430+#ifdef CONFIG_X86_PAE
11431+ if (__supported_pte_mask & _PAGE_NX)
11432+ return pte_clear_flags(pte, _PAGE_NX);
11433+ else
11434+#endif
11435+ return pte_set_flags(pte, _PAGE_USER);
11436+}
11437+
11438+static inline pte_t pte_exprotect(pte_t pte)
11439+{
11440+#ifdef CONFIG_X86_PAE
11441+ if (__supported_pte_mask & _PAGE_NX)
11442+ return pte_set_flags(pte, _PAGE_NX);
11443+ else
11444+#endif
11445+ return pte_clear_flags(pte, _PAGE_USER);
11446 }
11447
11448 static inline pte_t pte_mkdirty(pte_t pte)
11449@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11450 #endif
11451
11452 #ifndef __ASSEMBLY__
11453+
11454+#ifdef CONFIG_PAX_PER_CPU_PGD
11455+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11456+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11457+{
11458+ return cpu_pgd[cpu];
11459+}
11460+#endif
11461+
11462 #include <linux/mm_types.h>
11463
11464 static inline int pte_none(pte_t pte)
11465@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11466
11467 static inline int pgd_bad(pgd_t pgd)
11468 {
11469- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11470+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11471 }
11472
11473 static inline int pgd_none(pgd_t pgd)
11474@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
11475 * pgd_offset() returns a (pgd_t *)
11476 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11477 */
11478-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11479+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11480+
11481+#ifdef CONFIG_PAX_PER_CPU_PGD
11482+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11483+#endif
11484+
11485 /*
11486 * a shortcut which implies the use of the kernel's pgd, instead
11487 * of a process's
11488@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
11489 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11490 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11491
11492+#ifdef CONFIG_X86_32
11493+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11494+#else
11495+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11496+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11497+
11498+#ifdef CONFIG_PAX_MEMORY_UDEREF
11499+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11500+#else
11501+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11502+#endif
11503+
11504+#endif
11505+
11506 #ifndef __ASSEMBLY__
11507
11508 extern int direct_gbpages;
11509@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
11510 * dst and src can be on the same page, but the range must not overlap,
11511 * and must not cross a page boundary.
11512 */
11513-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11514+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11515 {
11516- memcpy(dst, src, count * sizeof(pgd_t));
11517+ pax_open_kernel();
11518+ while (count--)
11519+ *dst++ = *src++;
11520+ pax_close_kernel();
11521 }
11522
11523+#ifdef CONFIG_PAX_PER_CPU_PGD
11524+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
11525+#endif
11526+
11527+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11528+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
11529+#else
11530+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
11531+#endif
11532
11533 #include <asm-generic/pgtable.h>
11534 #endif /* __ASSEMBLY__ */
11535diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11536index 0c92113..34a77c6 100644
11537--- a/arch/x86/include/asm/pgtable_32.h
11538+++ b/arch/x86/include/asm/pgtable_32.h
11539@@ -25,9 +25,6 @@
11540 struct mm_struct;
11541 struct vm_area_struct;
11542
11543-extern pgd_t swapper_pg_dir[1024];
11544-extern pgd_t initial_page_table[1024];
11545-
11546 static inline void pgtable_cache_init(void) { }
11547 static inline void check_pgt_cache(void) { }
11548 void paging_init(void);
11549@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11550 # include <asm/pgtable-2level.h>
11551 #endif
11552
11553+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11554+extern pgd_t initial_page_table[PTRS_PER_PGD];
11555+#ifdef CONFIG_X86_PAE
11556+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11557+#endif
11558+
11559 #if defined(CONFIG_HIGHPTE)
11560 #define pte_offset_map(dir, address) \
11561 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
11562@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11563 /* Clear a kernel PTE and flush it from the TLB */
11564 #define kpte_clear_flush(ptep, vaddr) \
11565 do { \
11566+ pax_open_kernel(); \
11567 pte_clear(&init_mm, (vaddr), (ptep)); \
11568+ pax_close_kernel(); \
11569 __flush_tlb_one((vaddr)); \
11570 } while (0)
11571
11572@@ -74,6 +79,9 @@ do { \
11573
11574 #endif /* !__ASSEMBLY__ */
11575
11576+#define HAVE_ARCH_UNMAPPED_AREA
11577+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11578+
11579 /*
11580 * kern_addr_valid() is (1) for FLATMEM and (0) for
11581 * SPARSEMEM and DISCONTIGMEM
11582diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11583index ed5903b..c7fe163 100644
11584--- a/arch/x86/include/asm/pgtable_32_types.h
11585+++ b/arch/x86/include/asm/pgtable_32_types.h
11586@@ -8,7 +8,7 @@
11587 */
11588 #ifdef CONFIG_X86_PAE
11589 # include <asm/pgtable-3level_types.h>
11590-# define PMD_SIZE (1UL << PMD_SHIFT)
11591+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11592 # define PMD_MASK (~(PMD_SIZE - 1))
11593 #else
11594 # include <asm/pgtable-2level_types.h>
11595@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11596 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11597 #endif
11598
11599+#ifdef CONFIG_PAX_KERNEXEC
11600+#ifndef __ASSEMBLY__
11601+extern unsigned char MODULES_EXEC_VADDR[];
11602+extern unsigned char MODULES_EXEC_END[];
11603+#endif
11604+#include <asm/boot.h>
11605+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11606+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11607+#else
11608+#define ktla_ktva(addr) (addr)
11609+#define ktva_ktla(addr) (addr)
11610+#endif
11611+
11612 #define MODULES_VADDR VMALLOC_START
11613 #define MODULES_END VMALLOC_END
11614 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11615diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11616index 975f709..9f779c9 100644
11617--- a/arch/x86/include/asm/pgtable_64.h
11618+++ b/arch/x86/include/asm/pgtable_64.h
11619@@ -16,10 +16,14 @@
11620
11621 extern pud_t level3_kernel_pgt[512];
11622 extern pud_t level3_ident_pgt[512];
11623+extern pud_t level3_vmalloc_start_pgt[512];
11624+extern pud_t level3_vmalloc_end_pgt[512];
11625+extern pud_t level3_vmemmap_pgt[512];
11626+extern pud_t level2_vmemmap_pgt[512];
11627 extern pmd_t level2_kernel_pgt[512];
11628 extern pmd_t level2_fixmap_pgt[512];
11629-extern pmd_t level2_ident_pgt[512];
11630-extern pgd_t init_level4_pgt[];
11631+extern pmd_t level2_ident_pgt[512*2];
11632+extern pgd_t init_level4_pgt[512];
11633
11634 #define swapper_pg_dir init_level4_pgt
11635
11636@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11637
11638 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11639 {
11640+ pax_open_kernel();
11641 *pmdp = pmd;
11642+ pax_close_kernel();
11643 }
11644
11645 static inline void native_pmd_clear(pmd_t *pmd)
11646@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
11647
11648 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11649 {
11650+ pax_open_kernel();
11651 *pudp = pud;
11652+ pax_close_kernel();
11653 }
11654
11655 static inline void native_pud_clear(pud_t *pud)
11656@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
11657
11658 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11659 {
11660+ pax_open_kernel();
11661+ *pgdp = pgd;
11662+ pax_close_kernel();
11663+}
11664+
11665+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11666+{
11667 *pgdp = pgd;
11668 }
11669
11670diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11671index 766ea16..5b96cb3 100644
11672--- a/arch/x86/include/asm/pgtable_64_types.h
11673+++ b/arch/x86/include/asm/pgtable_64_types.h
11674@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11675 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11676 #define MODULES_END _AC(0xffffffffff000000, UL)
11677 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11678+#define MODULES_EXEC_VADDR MODULES_VADDR
11679+#define MODULES_EXEC_END MODULES_END
11680+
11681+#define ktla_ktva(addr) (addr)
11682+#define ktva_ktla(addr) (addr)
11683
11684 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11685diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11686index 013286a..8b42f4f 100644
11687--- a/arch/x86/include/asm/pgtable_types.h
11688+++ b/arch/x86/include/asm/pgtable_types.h
11689@@ -16,13 +16,12 @@
11690 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11691 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11692 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11693-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11694+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11695 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11696 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11697 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11698-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11699-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11700-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
11701+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11702+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
11703 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11704
11705 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11706@@ -40,7 +39,6 @@
11707 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11708 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11709 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11710-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11711 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11712 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11713 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11714@@ -57,8 +55,10 @@
11715
11716 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11717 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11718-#else
11719+#elif defined(CONFIG_KMEMCHECK)
11720 #define _PAGE_NX (_AT(pteval_t, 0))
11721+#else
11722+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11723 #endif
11724
11725 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11726@@ -96,6 +96,9 @@
11727 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11728 _PAGE_ACCESSED)
11729
11730+#define PAGE_READONLY_NOEXEC PAGE_READONLY
11731+#define PAGE_SHARED_NOEXEC PAGE_SHARED
11732+
11733 #define __PAGE_KERNEL_EXEC \
11734 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11735 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11736@@ -106,7 +109,7 @@
11737 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11738 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11739 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11740-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11741+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11742 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
11743 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
11744 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11745@@ -168,8 +171,8 @@
11746 * bits are combined, this will alow user to access the high address mapped
11747 * VDSO in the presence of CONFIG_COMPAT_VDSO
11748 */
11749-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11750-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11751+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11752+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11753 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11754 #endif
11755
11756@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11757 {
11758 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11759 }
11760+#endif
11761
11762+#if PAGETABLE_LEVELS == 3
11763+#include <asm-generic/pgtable-nopud.h>
11764+#endif
11765+
11766+#if PAGETABLE_LEVELS == 2
11767+#include <asm-generic/pgtable-nopmd.h>
11768+#endif
11769+
11770+#ifndef __ASSEMBLY__
11771 #if PAGETABLE_LEVELS > 3
11772 typedef struct { pudval_t pud; } pud_t;
11773
11774@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11775 return pud.pud;
11776 }
11777 #else
11778-#include <asm-generic/pgtable-nopud.h>
11779-
11780 static inline pudval_t native_pud_val(pud_t pud)
11781 {
11782 return native_pgd_val(pud.pgd);
11783@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11784 return pmd.pmd;
11785 }
11786 #else
11787-#include <asm-generic/pgtable-nopmd.h>
11788-
11789 static inline pmdval_t native_pmd_val(pmd_t pmd)
11790 {
11791 return native_pgd_val(pmd.pud.pgd);
11792@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
11793
11794 extern pteval_t __supported_pte_mask;
11795 extern void set_nx(void);
11796-extern int nx_enabled;
11797
11798 #define pgprot_writecombine pgprot_writecombine
11799 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11800diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11801index 4fa7dcc..764e33a 100644
11802--- a/arch/x86/include/asm/processor.h
11803+++ b/arch/x86/include/asm/processor.h
11804@@ -276,7 +276,7 @@ struct tss_struct {
11805
11806 } ____cacheline_aligned;
11807
11808-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11809+extern struct tss_struct init_tss[NR_CPUS];
11810
11811 /*
11812 * Save the original ist values for checking stack pointers during debugging
11813@@ -807,11 +807,18 @@ static inline void spin_lock_prefetch(const void *x)
11814 */
11815 #define TASK_SIZE PAGE_OFFSET
11816 #define TASK_SIZE_MAX TASK_SIZE
11817+
11818+#ifdef CONFIG_PAX_SEGMEXEC
11819+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11820+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11821+#else
11822 #define STACK_TOP TASK_SIZE
11823-#define STACK_TOP_MAX STACK_TOP
11824+#endif
11825+
11826+#define STACK_TOP_MAX TASK_SIZE
11827
11828 #define INIT_THREAD { \
11829- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11830+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11831 .vm86_info = NULL, \
11832 .sysenter_cs = __KERNEL_CS, \
11833 .io_bitmap_ptr = NULL, \
11834@@ -825,7 +832,7 @@ static inline void spin_lock_prefetch(const void *x)
11835 */
11836 #define INIT_TSS { \
11837 .x86_tss = { \
11838- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11839+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11840 .ss0 = __KERNEL_DS, \
11841 .ss1 = __KERNEL_CS, \
11842 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11843@@ -836,11 +843,7 @@ static inline void spin_lock_prefetch(const void *x)
11844 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11845
11846 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11847-#define KSTK_TOP(info) \
11848-({ \
11849- unsigned long *__ptr = (unsigned long *)(info); \
11850- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11851-})
11852+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11853
11854 /*
11855 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11856@@ -855,7 +858,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11857 #define task_pt_regs(task) \
11858 ({ \
11859 struct pt_regs *__regs__; \
11860- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11861+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11862 __regs__ - 1; \
11863 })
11864
11865@@ -865,13 +868,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11866 /*
11867 * User space process size. 47bits minus one guard page.
11868 */
11869-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11870+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11871
11872 /* This decides where the kernel will search for a free chunk of vm
11873 * space during mmap's.
11874 */
11875 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11876- 0xc0000000 : 0xFFFFe000)
11877+ 0xc0000000 : 0xFFFFf000)
11878
11879 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
11880 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11881@@ -882,11 +885,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11882 #define STACK_TOP_MAX TASK_SIZE_MAX
11883
11884 #define INIT_THREAD { \
11885- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11886+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11887 }
11888
11889 #define INIT_TSS { \
11890- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11891+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11892 }
11893
11894 /*
11895@@ -914,6 +917,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11896 */
11897 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11898
11899+#ifdef CONFIG_PAX_SEGMEXEC
11900+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11901+#endif
11902+
11903 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11904
11905 /* Get/set a process' ability to use the timestamp counter instruction */
11906@@ -976,12 +983,12 @@ extern bool cpu_has_amd_erratum(const int *);
11907
11908 void cpu_idle_wait(void);
11909
11910-extern unsigned long arch_align_stack(unsigned long sp);
11911+#define arch_align_stack(x) ((x) & ~0xfUL)
11912 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11913
11914 void default_idle(void);
11915 bool set_pm_idle_to_default(void);
11916
11917-void stop_this_cpu(void *dummy);
11918+void stop_this_cpu(void *dummy) __noreturn;
11919
11920 #endif /* _ASM_X86_PROCESSOR_H */
11921diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11922index dcfde52..dbfea06 100644
11923--- a/arch/x86/include/asm/ptrace.h
11924+++ b/arch/x86/include/asm/ptrace.h
11925@@ -155,28 +155,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11926 }
11927
11928 /*
11929- * user_mode_vm(regs) determines whether a register set came from user mode.
11930+ * user_mode(regs) determines whether a register set came from user mode.
11931 * This is true if V8086 mode was enabled OR if the register set was from
11932 * protected mode with RPL-3 CS value. This tricky test checks that with
11933 * one comparison. Many places in the kernel can bypass this full check
11934- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11935+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11936+ * be used.
11937 */
11938-static inline int user_mode(struct pt_regs *regs)
11939+static inline int user_mode_novm(struct pt_regs *regs)
11940 {
11941 #ifdef CONFIG_X86_32
11942 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11943 #else
11944- return !!(regs->cs & 3);
11945+ return !!(regs->cs & SEGMENT_RPL_MASK);
11946 #endif
11947 }
11948
11949-static inline int user_mode_vm(struct pt_regs *regs)
11950+static inline int user_mode(struct pt_regs *regs)
11951 {
11952 #ifdef CONFIG_X86_32
11953 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11954 USER_RPL;
11955 #else
11956- return user_mode(regs);
11957+ return user_mode_novm(regs);
11958 #endif
11959 }
11960
11961@@ -192,15 +193,16 @@ static inline int v8086_mode(struct pt_regs *regs)
11962 #ifdef CONFIG_X86_64
11963 static inline bool user_64bit_mode(struct pt_regs *regs)
11964 {
11965+ unsigned long cs = regs->cs & 0xffff;
11966 #ifndef CONFIG_PARAVIRT
11967 /*
11968 * On non-paravirt systems, this is the only long mode CPL 3
11969 * selector. We do not allow long mode selectors in the LDT.
11970 */
11971- return regs->cs == __USER_CS;
11972+ return cs == __USER_CS;
11973 #else
11974 /* Headers are too twisted for this to go in paravirt.h. */
11975- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
11976+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
11977 #endif
11978 }
11979 #endif
11980diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11981index 92f29706..a79cbbb 100644
11982--- a/arch/x86/include/asm/reboot.h
11983+++ b/arch/x86/include/asm/reboot.h
11984@@ -6,19 +6,19 @@
11985 struct pt_regs;
11986
11987 struct machine_ops {
11988- void (*restart)(char *cmd);
11989- void (*halt)(void);
11990- void (*power_off)(void);
11991+ void (* __noreturn restart)(char *cmd);
11992+ void (* __noreturn halt)(void);
11993+ void (* __noreturn power_off)(void);
11994 void (*shutdown)(void);
11995 void (*crash_shutdown)(struct pt_regs *);
11996- void (*emergency_restart)(void);
11997-};
11998+ void (* __noreturn emergency_restart)(void);
11999+} __no_const;
12000
12001 extern struct machine_ops machine_ops;
12002
12003 void native_machine_crash_shutdown(struct pt_regs *regs);
12004 void native_machine_shutdown(void);
12005-void machine_real_restart(unsigned int type);
12006+void machine_real_restart(unsigned int type) __noreturn;
12007 /* These must match dispatch_table in reboot_32.S */
12008 #define MRR_BIOS 0
12009 #define MRR_APM 1
12010diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
12011index 2dbe4a7..ce1db00 100644
12012--- a/arch/x86/include/asm/rwsem.h
12013+++ b/arch/x86/include/asm/rwsem.h
12014@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
12015 {
12016 asm volatile("# beginning down_read\n\t"
12017 LOCK_PREFIX _ASM_INC "(%1)\n\t"
12018+
12019+#ifdef CONFIG_PAX_REFCOUNT
12020+ "jno 0f\n"
12021+ LOCK_PREFIX _ASM_DEC "(%1)\n"
12022+ "int $4\n0:\n"
12023+ _ASM_EXTABLE(0b, 0b)
12024+#endif
12025+
12026 /* adds 0x00000001 */
12027 " jns 1f\n"
12028 " call call_rwsem_down_read_failed\n"
12029@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
12030 "1:\n\t"
12031 " mov %1,%2\n\t"
12032 " add %3,%2\n\t"
12033+
12034+#ifdef CONFIG_PAX_REFCOUNT
12035+ "jno 0f\n"
12036+ "sub %3,%2\n"
12037+ "int $4\n0:\n"
12038+ _ASM_EXTABLE(0b, 0b)
12039+#endif
12040+
12041 " jle 2f\n\t"
12042 LOCK_PREFIX " cmpxchg %2,%0\n\t"
12043 " jnz 1b\n\t"
12044@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
12045 long tmp;
12046 asm volatile("# beginning down_write\n\t"
12047 LOCK_PREFIX " xadd %1,(%2)\n\t"
12048+
12049+#ifdef CONFIG_PAX_REFCOUNT
12050+ "jno 0f\n"
12051+ "mov %1,(%2)\n"
12052+ "int $4\n0:\n"
12053+ _ASM_EXTABLE(0b, 0b)
12054+#endif
12055+
12056 /* adds 0xffff0001, returns the old value */
12057 " test %1,%1\n\t"
12058 /* was the count 0 before? */
12059@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
12060 long tmp;
12061 asm volatile("# beginning __up_read\n\t"
12062 LOCK_PREFIX " xadd %1,(%2)\n\t"
12063+
12064+#ifdef CONFIG_PAX_REFCOUNT
12065+ "jno 0f\n"
12066+ "mov %1,(%2)\n"
12067+ "int $4\n0:\n"
12068+ _ASM_EXTABLE(0b, 0b)
12069+#endif
12070+
12071 /* subtracts 1, returns the old value */
12072 " jns 1f\n\t"
12073 " call call_rwsem_wake\n" /* expects old value in %edx */
12074@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
12075 long tmp;
12076 asm volatile("# beginning __up_write\n\t"
12077 LOCK_PREFIX " xadd %1,(%2)\n\t"
12078+
12079+#ifdef CONFIG_PAX_REFCOUNT
12080+ "jno 0f\n"
12081+ "mov %1,(%2)\n"
12082+ "int $4\n0:\n"
12083+ _ASM_EXTABLE(0b, 0b)
12084+#endif
12085+
12086 /* subtracts 0xffff0001, returns the old value */
12087 " jns 1f\n\t"
12088 " call call_rwsem_wake\n" /* expects old value in %edx */
12089@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12090 {
12091 asm volatile("# beginning __downgrade_write\n\t"
12092 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
12093+
12094+#ifdef CONFIG_PAX_REFCOUNT
12095+ "jno 0f\n"
12096+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
12097+ "int $4\n0:\n"
12098+ _ASM_EXTABLE(0b, 0b)
12099+#endif
12100+
12101 /*
12102 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
12103 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
12104@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12105 */
12106 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12107 {
12108- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
12109+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
12110+
12111+#ifdef CONFIG_PAX_REFCOUNT
12112+ "jno 0f\n"
12113+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
12114+ "int $4\n0:\n"
12115+ _ASM_EXTABLE(0b, 0b)
12116+#endif
12117+
12118 : "+m" (sem->count)
12119 : "er" (delta));
12120 }
12121@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12122 */
12123 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
12124 {
12125- return delta + xadd(&sem->count, delta);
12126+ return delta + xadd_check_overflow(&sem->count, delta);
12127 }
12128
12129 #endif /* __KERNEL__ */
12130diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
12131index 1654662..5af4157 100644
12132--- a/arch/x86/include/asm/segment.h
12133+++ b/arch/x86/include/asm/segment.h
12134@@ -64,10 +64,15 @@
12135 * 26 - ESPFIX small SS
12136 * 27 - per-cpu [ offset to per-cpu data area ]
12137 * 28 - stack_canary-20 [ for stack protector ]
12138- * 29 - unused
12139- * 30 - unused
12140+ * 29 - PCI BIOS CS
12141+ * 30 - PCI BIOS DS
12142 * 31 - TSS for double fault handler
12143 */
12144+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
12145+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
12146+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
12147+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
12148+
12149 #define GDT_ENTRY_TLS_MIN 6
12150 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
12151
12152@@ -79,6 +84,8 @@
12153
12154 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
12155
12156+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
12157+
12158 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
12159
12160 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
12161@@ -104,6 +111,12 @@
12162 #define __KERNEL_STACK_CANARY 0
12163 #endif
12164
12165+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
12166+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
12167+
12168+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
12169+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
12170+
12171 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
12172
12173 /*
12174@@ -141,7 +154,7 @@
12175 */
12176
12177 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
12178-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
12179+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
12180
12181
12182 #else
12183@@ -165,6 +178,8 @@
12184 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
12185 #define __USER32_DS __USER_DS
12186
12187+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
12188+
12189 #define GDT_ENTRY_TSS 8 /* needs two entries */
12190 #define GDT_ENTRY_LDT 10 /* needs two entries */
12191 #define GDT_ENTRY_TLS_MIN 12
12192@@ -185,6 +200,7 @@
12193 #endif
12194
12195 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
12196+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
12197 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
12198 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
12199 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
12200@@ -263,7 +279,7 @@ static inline unsigned long get_limit(unsigned long segment)
12201 {
12202 unsigned long __limit;
12203 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
12204- return __limit + 1;
12205+ return __limit;
12206 }
12207
12208 #endif /* !__ASSEMBLY__ */
12209diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
12210index 0434c40..1714bf0 100644
12211--- a/arch/x86/include/asm/smp.h
12212+++ b/arch/x86/include/asm/smp.h
12213@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
12214 /* cpus sharing the last level cache: */
12215 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
12216 DECLARE_PER_CPU(u16, cpu_llc_id);
12217-DECLARE_PER_CPU(int, cpu_number);
12218+DECLARE_PER_CPU(unsigned int, cpu_number);
12219
12220 static inline struct cpumask *cpu_sibling_mask(int cpu)
12221 {
12222@@ -77,7 +77,7 @@ struct smp_ops {
12223
12224 void (*send_call_func_ipi)(const struct cpumask *mask);
12225 void (*send_call_func_single_ipi)(int cpu);
12226-};
12227+} __no_const;
12228
12229 /* Globals due to paravirt */
12230 extern void set_cpu_sibling_map(int cpu);
12231@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
12232 extern int safe_smp_processor_id(void);
12233
12234 #elif defined(CONFIG_X86_64_SMP)
12235-#define raw_smp_processor_id() (percpu_read(cpu_number))
12236-
12237-#define stack_smp_processor_id() \
12238-({ \
12239- struct thread_info *ti; \
12240- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
12241- ti->cpu; \
12242-})
12243+#define raw_smp_processor_id() (percpu_read(cpu_number))
12244+#define stack_smp_processor_id() raw_smp_processor_id()
12245 #define safe_smp_processor_id() smp_processor_id()
12246
12247 #endif
12248diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
12249index 76bfa2c..12d3fe7 100644
12250--- a/arch/x86/include/asm/spinlock.h
12251+++ b/arch/x86/include/asm/spinlock.h
12252@@ -175,6 +175,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
12253 static inline void arch_read_lock(arch_rwlock_t *rw)
12254 {
12255 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
12256+
12257+#ifdef CONFIG_PAX_REFCOUNT
12258+ "jno 0f\n"
12259+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
12260+ "int $4\n0:\n"
12261+ _ASM_EXTABLE(0b, 0b)
12262+#endif
12263+
12264 "jns 1f\n"
12265 "call __read_lock_failed\n\t"
12266 "1:\n"
12267@@ -184,6 +192,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
12268 static inline void arch_write_lock(arch_rwlock_t *rw)
12269 {
12270 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
12271+
12272+#ifdef CONFIG_PAX_REFCOUNT
12273+ "jno 0f\n"
12274+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
12275+ "int $4\n0:\n"
12276+ _ASM_EXTABLE(0b, 0b)
12277+#endif
12278+
12279 "jz 1f\n"
12280 "call __write_lock_failed\n\t"
12281 "1:\n"
12282@@ -213,13 +229,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
12283
12284 static inline void arch_read_unlock(arch_rwlock_t *rw)
12285 {
12286- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
12287+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
12288+
12289+#ifdef CONFIG_PAX_REFCOUNT
12290+ "jno 0f\n"
12291+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
12292+ "int $4\n0:\n"
12293+ _ASM_EXTABLE(0b, 0b)
12294+#endif
12295+
12296 :"+m" (rw->lock) : : "memory");
12297 }
12298
12299 static inline void arch_write_unlock(arch_rwlock_t *rw)
12300 {
12301- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
12302+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
12303+
12304+#ifdef CONFIG_PAX_REFCOUNT
12305+ "jno 0f\n"
12306+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
12307+ "int $4\n0:\n"
12308+ _ASM_EXTABLE(0b, 0b)
12309+#endif
12310+
12311 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
12312 }
12313
12314diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
12315index b5d9533..41655fa 100644
12316--- a/arch/x86/include/asm/stackprotector.h
12317+++ b/arch/x86/include/asm/stackprotector.h
12318@@ -47,7 +47,7 @@
12319 * head_32 for boot CPU and setup_per_cpu_areas() for others.
12320 */
12321 #define GDT_STACK_CANARY_INIT \
12322- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
12323+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
12324
12325 /*
12326 * Initialize the stackprotector canary value.
12327@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
12328
12329 static inline void load_stack_canary_segment(void)
12330 {
12331-#ifdef CONFIG_X86_32
12332+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
12333 asm volatile ("mov %0, %%gs" : : "r" (0));
12334 #endif
12335 }
12336diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
12337index 70bbe39..4ae2bd4 100644
12338--- a/arch/x86/include/asm/stacktrace.h
12339+++ b/arch/x86/include/asm/stacktrace.h
12340@@ -11,28 +11,20 @@
12341
12342 extern int kstack_depth_to_print;
12343
12344-struct thread_info;
12345+struct task_struct;
12346 struct stacktrace_ops;
12347
12348-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
12349- unsigned long *stack,
12350- unsigned long bp,
12351- const struct stacktrace_ops *ops,
12352- void *data,
12353- unsigned long *end,
12354- int *graph);
12355+typedef unsigned long walk_stack_t(struct task_struct *task,
12356+ void *stack_start,
12357+ unsigned long *stack,
12358+ unsigned long bp,
12359+ const struct stacktrace_ops *ops,
12360+ void *data,
12361+ unsigned long *end,
12362+ int *graph);
12363
12364-extern unsigned long
12365-print_context_stack(struct thread_info *tinfo,
12366- unsigned long *stack, unsigned long bp,
12367- const struct stacktrace_ops *ops, void *data,
12368- unsigned long *end, int *graph);
12369-
12370-extern unsigned long
12371-print_context_stack_bp(struct thread_info *tinfo,
12372- unsigned long *stack, unsigned long bp,
12373- const struct stacktrace_ops *ops, void *data,
12374- unsigned long *end, int *graph);
12375+extern walk_stack_t print_context_stack;
12376+extern walk_stack_t print_context_stack_bp;
12377
12378 /* Generic stack tracer with callbacks */
12379
12380@@ -40,7 +32,7 @@ struct stacktrace_ops {
12381 void (*address)(void *data, unsigned long address, int reliable);
12382 /* On negative return stop dumping */
12383 int (*stack)(void *data, char *name);
12384- walk_stack_t walk_stack;
12385+ walk_stack_t *walk_stack;
12386 };
12387
12388 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
12389diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
12390index 4ec45b3..a4f0a8a 100644
12391--- a/arch/x86/include/asm/switch_to.h
12392+++ b/arch/x86/include/asm/switch_to.h
12393@@ -108,7 +108,7 @@ do { \
12394 "call __switch_to\n\t" \
12395 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
12396 __switch_canary \
12397- "movq %P[thread_info](%%rsi),%%r8\n\t" \
12398+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
12399 "movq %%rax,%%rdi\n\t" \
12400 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
12401 "jnz ret_from_fork\n\t" \
12402@@ -119,7 +119,7 @@ do { \
12403 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
12404 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
12405 [_tif_fork] "i" (_TIF_FORK), \
12406- [thread_info] "i" (offsetof(struct task_struct, stack)), \
12407+ [thread_info] "m" (current_tinfo), \
12408 [current_task] "m" (current_task) \
12409 __switch_canary_iparam \
12410 : "memory", "cc" __EXTRA_CLOBBER)
12411diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
12412index 3fda9db4..4ca1c61 100644
12413--- a/arch/x86/include/asm/sys_ia32.h
12414+++ b/arch/x86/include/asm/sys_ia32.h
12415@@ -40,7 +40,7 @@ asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *,
12416 struct old_sigaction32 __user *);
12417 asmlinkage long sys32_alarm(unsigned int);
12418
12419-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
12420+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
12421 asmlinkage long sys32_sysfs(int, u32, u32);
12422
12423 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
12424diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
12425index ad6df8c..5e0cf6e 100644
12426--- a/arch/x86/include/asm/thread_info.h
12427+++ b/arch/x86/include/asm/thread_info.h
12428@@ -10,6 +10,7 @@
12429 #include <linux/compiler.h>
12430 #include <asm/page.h>
12431 #include <asm/types.h>
12432+#include <asm/percpu.h>
12433
12434 /*
12435 * low level task data that entry.S needs immediate access to
12436@@ -24,7 +25,6 @@ struct exec_domain;
12437 #include <linux/atomic.h>
12438
12439 struct thread_info {
12440- struct task_struct *task; /* main task structure */
12441 struct exec_domain *exec_domain; /* execution domain */
12442 __u32 flags; /* low level flags */
12443 __u32 status; /* thread synchronous flags */
12444@@ -34,19 +34,13 @@ struct thread_info {
12445 mm_segment_t addr_limit;
12446 struct restart_block restart_block;
12447 void __user *sysenter_return;
12448-#ifdef CONFIG_X86_32
12449- unsigned long previous_esp; /* ESP of the previous stack in
12450- case of nested (IRQ) stacks
12451- */
12452- __u8 supervisor_stack[0];
12453-#endif
12454+ unsigned long lowest_stack;
12455 unsigned int sig_on_uaccess_error:1;
12456 unsigned int uaccess_err:1; /* uaccess failed */
12457 };
12458
12459-#define INIT_THREAD_INFO(tsk) \
12460+#define INIT_THREAD_INFO \
12461 { \
12462- .task = &tsk, \
12463 .exec_domain = &default_exec_domain, \
12464 .flags = 0, \
12465 .cpu = 0, \
12466@@ -57,7 +51,7 @@ struct thread_info {
12467 }, \
12468 }
12469
12470-#define init_thread_info (init_thread_union.thread_info)
12471+#define init_thread_info (init_thread_union.stack)
12472 #define init_stack (init_thread_union.stack)
12473
12474 #else /* !__ASSEMBLY__ */
12475@@ -97,6 +91,7 @@ struct thread_info {
12476 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
12477 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
12478 #define TIF_X32 30 /* 32-bit native x86-64 binary */
12479+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
12480
12481 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
12482 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
12483@@ -120,16 +115,18 @@ struct thread_info {
12484 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
12485 #define _TIF_ADDR32 (1 << TIF_ADDR32)
12486 #define _TIF_X32 (1 << TIF_X32)
12487+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
12488
12489 /* work to do in syscall_trace_enter() */
12490 #define _TIF_WORK_SYSCALL_ENTRY \
12491 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
12492- _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
12493+ _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
12494+ _TIF_GRSEC_SETXID)
12495
12496 /* work to do in syscall_trace_leave() */
12497 #define _TIF_WORK_SYSCALL_EXIT \
12498 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
12499- _TIF_SYSCALL_TRACEPOINT)
12500+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
12501
12502 /* work to do on interrupt/exception return */
12503 #define _TIF_WORK_MASK \
12504@@ -139,7 +136,8 @@ struct thread_info {
12505
12506 /* work to do on any return to user space */
12507 #define _TIF_ALLWORK_MASK \
12508- ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT)
12509+ ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
12510+ _TIF_GRSEC_SETXID)
12511
12512 /* Only used for 64 bit */
12513 #define _TIF_DO_NOTIFY_MASK \
12514@@ -173,45 +171,40 @@ struct thread_info {
12515 ret; \
12516 })
12517
12518-#ifdef CONFIG_X86_32
12519-
12520-#define STACK_WARN (THREAD_SIZE/8)
12521-/*
12522- * macros/functions for gaining access to the thread information structure
12523- *
12524- * preempt_count needs to be 1 initially, until the scheduler is functional.
12525- */
12526-#ifndef __ASSEMBLY__
12527-
12528-
12529-/* how to get the current stack pointer from C */
12530-register unsigned long current_stack_pointer asm("esp") __used;
12531-
12532-/* how to get the thread information struct from C */
12533-static inline struct thread_info *current_thread_info(void)
12534-{
12535- return (struct thread_info *)
12536- (current_stack_pointer & ~(THREAD_SIZE - 1));
12537-}
12538-
12539-#else /* !__ASSEMBLY__ */
12540-
12541+#ifdef __ASSEMBLY__
12542 /* how to get the thread information struct from ASM */
12543 #define GET_THREAD_INFO(reg) \
12544- movl $-THREAD_SIZE, reg; \
12545- andl %esp, reg
12546+ mov PER_CPU_VAR(current_tinfo), reg
12547
12548 /* use this one if reg already contains %esp */
12549-#define GET_THREAD_INFO_WITH_ESP(reg) \
12550- andl $-THREAD_SIZE, reg
12551+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12552+#else
12553+/* how to get the thread information struct from C */
12554+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12555+
12556+static __always_inline struct thread_info *current_thread_info(void)
12557+{
12558+ return percpu_read_stable(current_tinfo);
12559+}
12560+#endif
12561+
12562+#ifdef CONFIG_X86_32
12563+
12564+#define STACK_WARN (THREAD_SIZE/8)
12565+/*
12566+ * macros/functions for gaining access to the thread information structure
12567+ *
12568+ * preempt_count needs to be 1 initially, until the scheduler is functional.
12569+ */
12570+#ifndef __ASSEMBLY__
12571+
12572+/* how to get the current stack pointer from C */
12573+register unsigned long current_stack_pointer asm("esp") __used;
12574
12575 #endif
12576
12577 #else /* X86_32 */
12578
12579-#include <asm/percpu.h>
12580-#define KERNEL_STACK_OFFSET (5*8)
12581-
12582 /*
12583 * macros/functions for gaining access to the thread information structure
12584 * preempt_count needs to be 1 initially, until the scheduler is functional.
12585@@ -219,27 +212,8 @@ static inline struct thread_info *current_thread_info(void)
12586 #ifndef __ASSEMBLY__
12587 DECLARE_PER_CPU(unsigned long, kernel_stack);
12588
12589-static inline struct thread_info *current_thread_info(void)
12590-{
12591- struct thread_info *ti;
12592- ti = (void *)(percpu_read_stable(kernel_stack) +
12593- KERNEL_STACK_OFFSET - THREAD_SIZE);
12594- return ti;
12595-}
12596-
12597-#else /* !__ASSEMBLY__ */
12598-
12599-/* how to get the thread information struct from ASM */
12600-#define GET_THREAD_INFO(reg) \
12601- movq PER_CPU_VAR(kernel_stack),reg ; \
12602- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12603-
12604-/*
12605- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
12606- * a certain register (to be used in assembler memory operands).
12607- */
12608-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
12609-
12610+/* how to get the current stack pointer from C */
12611+register unsigned long current_stack_pointer asm("rsp") __used;
12612 #endif
12613
12614 #endif /* !X86_32 */
12615@@ -285,5 +259,16 @@ extern void arch_task_cache_init(void);
12616 extern void free_thread_info(struct thread_info *ti);
12617 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12618 #define arch_task_cache_init arch_task_cache_init
12619+
12620+#define __HAVE_THREAD_FUNCTIONS
12621+#define task_thread_info(task) (&(task)->tinfo)
12622+#define task_stack_page(task) ((task)->stack)
12623+#define setup_thread_stack(p, org) do {} while (0)
12624+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12625+
12626+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12627+extern struct task_struct *alloc_task_struct_node(int node);
12628+extern void free_task_struct(struct task_struct *);
12629+
12630 #endif
12631 #endif /* _ASM_X86_THREAD_INFO_H */
12632diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12633index e054459..14bc8a7 100644
12634--- a/arch/x86/include/asm/uaccess.h
12635+++ b/arch/x86/include/asm/uaccess.h
12636@@ -7,12 +7,15 @@
12637 #include <linux/compiler.h>
12638 #include <linux/thread_info.h>
12639 #include <linux/string.h>
12640+#include <linux/sched.h>
12641 #include <asm/asm.h>
12642 #include <asm/page.h>
12643
12644 #define VERIFY_READ 0
12645 #define VERIFY_WRITE 1
12646
12647+extern void check_object_size(const void *ptr, unsigned long n, bool to);
12648+
12649 /*
12650 * The fs value determines whether argument validity checking should be
12651 * performed or not. If get_fs() == USER_DS, checking is performed, with
12652@@ -28,7 +31,12 @@
12653
12654 #define get_ds() (KERNEL_DS)
12655 #define get_fs() (current_thread_info()->addr_limit)
12656+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12657+void __set_fs(mm_segment_t x);
12658+void set_fs(mm_segment_t x);
12659+#else
12660 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12661+#endif
12662
12663 #define segment_eq(a, b) ((a).seg == (b).seg)
12664
12665@@ -76,7 +84,33 @@
12666 * checks that the pointer is in the user space range - after calling
12667 * this function, memory access functions may still return -EFAULT.
12668 */
12669-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12670+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12671+#define access_ok(type, addr, size) \
12672+({ \
12673+ long __size = size; \
12674+ unsigned long __addr = (unsigned long)addr; \
12675+ unsigned long __addr_ao = __addr & PAGE_MASK; \
12676+ unsigned long __end_ao = __addr + __size - 1; \
12677+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12678+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12679+ while(__addr_ao <= __end_ao) { \
12680+ char __c_ao; \
12681+ __addr_ao += PAGE_SIZE; \
12682+ if (__size > PAGE_SIZE) \
12683+ cond_resched(); \
12684+ if (__get_user(__c_ao, (char __user *)__addr)) \
12685+ break; \
12686+ if (type != VERIFY_WRITE) { \
12687+ __addr = __addr_ao; \
12688+ continue; \
12689+ } \
12690+ if (__put_user(__c_ao, (char __user *)__addr)) \
12691+ break; \
12692+ __addr = __addr_ao; \
12693+ } \
12694+ } \
12695+ __ret_ao; \
12696+})
12697
12698 /*
12699 * The exception table consists of pairs of addresses: the first is the
12700@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
12701 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12702 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12703
12704-
12705+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12706+#define __copyuser_seg "gs;"
12707+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12708+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12709+#else
12710+#define __copyuser_seg
12711+#define __COPYUSER_SET_ES
12712+#define __COPYUSER_RESTORE_ES
12713+#endif
12714
12715 #ifdef CONFIG_X86_32
12716 #define __put_user_asm_u64(x, addr, err, errret) \
12717- asm volatile("1: movl %%eax,0(%2)\n" \
12718- "2: movl %%edx,4(%2)\n" \
12719+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12720+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12721 "3:\n" \
12722 ".section .fixup,\"ax\"\n" \
12723 "4: movl %3,%0\n" \
12724@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
12725 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12726
12727 #define __put_user_asm_ex_u64(x, addr) \
12728- asm volatile("1: movl %%eax,0(%1)\n" \
12729- "2: movl %%edx,4(%1)\n" \
12730+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12731+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12732 "3:\n" \
12733 _ASM_EXTABLE(1b, 2b - 1b) \
12734 _ASM_EXTABLE(2b, 3b - 2b) \
12735@@ -252,7 +294,7 @@ extern void __put_user_8(void);
12736 __typeof__(*(ptr)) __pu_val; \
12737 __chk_user_ptr(ptr); \
12738 might_fault(); \
12739- __pu_val = x; \
12740+ __pu_val = (x); \
12741 switch (sizeof(*(ptr))) { \
12742 case 1: \
12743 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12744@@ -373,7 +415,7 @@ do { \
12745 } while (0)
12746
12747 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12748- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12749+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12750 "2:\n" \
12751 ".section .fixup,\"ax\"\n" \
12752 "3: mov %3,%0\n" \
12753@@ -381,7 +423,7 @@ do { \
12754 " jmp 2b\n" \
12755 ".previous\n" \
12756 _ASM_EXTABLE(1b, 3b) \
12757- : "=r" (err), ltype(x) \
12758+ : "=r" (err), ltype (x) \
12759 : "m" (__m(addr)), "i" (errret), "0" (err))
12760
12761 #define __get_user_size_ex(x, ptr, size) \
12762@@ -406,7 +448,7 @@ do { \
12763 } while (0)
12764
12765 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12766- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12767+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12768 "2:\n" \
12769 _ASM_EXTABLE(1b, 2b - 1b) \
12770 : ltype(x) : "m" (__m(addr)))
12771@@ -423,13 +465,24 @@ do { \
12772 int __gu_err; \
12773 unsigned long __gu_val; \
12774 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12775- (x) = (__force __typeof__(*(ptr)))__gu_val; \
12776+ (x) = (__typeof__(*(ptr)))__gu_val; \
12777 __gu_err; \
12778 })
12779
12780 /* FIXME: this hack is definitely wrong -AK */
12781 struct __large_struct { unsigned long buf[100]; };
12782-#define __m(x) (*(struct __large_struct __user *)(x))
12783+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12784+#define ____m(x) \
12785+({ \
12786+ unsigned long ____x = (unsigned long)(x); \
12787+ if (____x < PAX_USER_SHADOW_BASE) \
12788+ ____x += PAX_USER_SHADOW_BASE; \
12789+ (void __user *)____x; \
12790+})
12791+#else
12792+#define ____m(x) (x)
12793+#endif
12794+#define __m(x) (*(struct __large_struct __user *)____m(x))
12795
12796 /*
12797 * Tell gcc we read from memory instead of writing: this is because
12798@@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
12799 * aliasing issues.
12800 */
12801 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12802- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12803+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12804 "2:\n" \
12805 ".section .fixup,\"ax\"\n" \
12806 "3: mov %3,%0\n" \
12807@@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
12808 ".previous\n" \
12809 _ASM_EXTABLE(1b, 3b) \
12810 : "=r"(err) \
12811- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12812+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12813
12814 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12815- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12816+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12817 "2:\n" \
12818 _ASM_EXTABLE(1b, 2b - 1b) \
12819 : : ltype(x), "m" (__m(addr)))
12820@@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
12821 * On error, the variable @x is set to zero.
12822 */
12823
12824+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12825+#define __get_user(x, ptr) get_user((x), (ptr))
12826+#else
12827 #define __get_user(x, ptr) \
12828 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12829+#endif
12830
12831 /**
12832 * __put_user: - Write a simple value into user space, with less checking.
12833@@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
12834 * Returns zero on success, or -EFAULT on error.
12835 */
12836
12837+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12838+#define __put_user(x, ptr) put_user((x), (ptr))
12839+#else
12840 #define __put_user(x, ptr) \
12841 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12842+#endif
12843
12844 #define __get_user_unaligned __get_user
12845 #define __put_user_unaligned __put_user
12846@@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
12847 #define get_user_ex(x, ptr) do { \
12848 unsigned long __gue_val; \
12849 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12850- (x) = (__force __typeof__(*(ptr)))__gue_val; \
12851+ (x) = (__typeof__(*(ptr)))__gue_val; \
12852 } while (0)
12853
12854 #ifdef CONFIG_X86_WP_WORKS_OK
12855diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12856index 8084bc7..3d6ec37 100644
12857--- a/arch/x86/include/asm/uaccess_32.h
12858+++ b/arch/x86/include/asm/uaccess_32.h
12859@@ -11,15 +11,15 @@
12860 #include <asm/page.h>
12861
12862 unsigned long __must_check __copy_to_user_ll
12863- (void __user *to, const void *from, unsigned long n);
12864+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
12865 unsigned long __must_check __copy_from_user_ll
12866- (void *to, const void __user *from, unsigned long n);
12867+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12868 unsigned long __must_check __copy_from_user_ll_nozero
12869- (void *to, const void __user *from, unsigned long n);
12870+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12871 unsigned long __must_check __copy_from_user_ll_nocache
12872- (void *to, const void __user *from, unsigned long n);
12873+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12874 unsigned long __must_check __copy_from_user_ll_nocache_nozero
12875- (void *to, const void __user *from, unsigned long n);
12876+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12877
12878 /**
12879 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
12880@@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12881 static __always_inline unsigned long __must_check
12882 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12883 {
12884+ if ((long)n < 0)
12885+ return n;
12886+
12887 if (__builtin_constant_p(n)) {
12888 unsigned long ret;
12889
12890@@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12891 return ret;
12892 }
12893 }
12894+ if (!__builtin_constant_p(n))
12895+ check_object_size(from, n, true);
12896 return __copy_to_user_ll(to, from, n);
12897 }
12898
12899@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
12900 __copy_to_user(void __user *to, const void *from, unsigned long n)
12901 {
12902 might_fault();
12903+
12904 return __copy_to_user_inatomic(to, from, n);
12905 }
12906
12907 static __always_inline unsigned long
12908 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12909 {
12910+ if ((long)n < 0)
12911+ return n;
12912+
12913 /* Avoid zeroing the tail if the copy fails..
12914 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12915 * but as the zeroing behaviour is only significant when n is not
12916@@ -137,6 +146,10 @@ static __always_inline unsigned long
12917 __copy_from_user(void *to, const void __user *from, unsigned long n)
12918 {
12919 might_fault();
12920+
12921+ if ((long)n < 0)
12922+ return n;
12923+
12924 if (__builtin_constant_p(n)) {
12925 unsigned long ret;
12926
12927@@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12928 return ret;
12929 }
12930 }
12931+ if (!__builtin_constant_p(n))
12932+ check_object_size(to, n, false);
12933 return __copy_from_user_ll(to, from, n);
12934 }
12935
12936@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12937 const void __user *from, unsigned long n)
12938 {
12939 might_fault();
12940+
12941+ if ((long)n < 0)
12942+ return n;
12943+
12944 if (__builtin_constant_p(n)) {
12945 unsigned long ret;
12946
12947@@ -181,15 +200,19 @@ static __always_inline unsigned long
12948 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12949 unsigned long n)
12950 {
12951- return __copy_from_user_ll_nocache_nozero(to, from, n);
12952+ if ((long)n < 0)
12953+ return n;
12954+
12955+ return __copy_from_user_ll_nocache_nozero(to, from, n);
12956 }
12957
12958-unsigned long __must_check copy_to_user(void __user *to,
12959- const void *from, unsigned long n);
12960-unsigned long __must_check _copy_from_user(void *to,
12961- const void __user *from,
12962- unsigned long n);
12963-
12964+extern void copy_to_user_overflow(void)
12965+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12966+ __compiletime_error("copy_to_user() buffer size is not provably correct")
12967+#else
12968+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
12969+#endif
12970+;
12971
12972 extern void copy_from_user_overflow(void)
12973 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12974@@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
12975 #endif
12976 ;
12977
12978-static inline unsigned long __must_check copy_from_user(void *to,
12979- const void __user *from,
12980- unsigned long n)
12981+/**
12982+ * copy_to_user: - Copy a block of data into user space.
12983+ * @to: Destination address, in user space.
12984+ * @from: Source address, in kernel space.
12985+ * @n: Number of bytes to copy.
12986+ *
12987+ * Context: User context only. This function may sleep.
12988+ *
12989+ * Copy data from kernel space to user space.
12990+ *
12991+ * Returns number of bytes that could not be copied.
12992+ * On success, this will be zero.
12993+ */
12994+static inline unsigned long __must_check
12995+copy_to_user(void __user *to, const void *from, unsigned long n)
12996 {
12997- int sz = __compiletime_object_size(to);
12998+ size_t sz = __compiletime_object_size(from);
12999
13000- if (likely(sz == -1 || sz >= n))
13001- n = _copy_from_user(to, from, n);
13002- else
13003+ if (unlikely(sz != (size_t)-1 && sz < n))
13004+ copy_to_user_overflow();
13005+ else if (access_ok(VERIFY_WRITE, to, n))
13006+ n = __copy_to_user(to, from, n);
13007+ return n;
13008+}
13009+
13010+/**
13011+ * copy_from_user: - Copy a block of data from user space.
13012+ * @to: Destination address, in kernel space.
13013+ * @from: Source address, in user space.
13014+ * @n: Number of bytes to copy.
13015+ *
13016+ * Context: User context only. This function may sleep.
13017+ *
13018+ * Copy data from user space to kernel space.
13019+ *
13020+ * Returns number of bytes that could not be copied.
13021+ * On success, this will be zero.
13022+ *
13023+ * If some data could not be copied, this function will pad the copied
13024+ * data to the requested size using zero bytes.
13025+ */
13026+static inline unsigned long __must_check
13027+copy_from_user(void *to, const void __user *from, unsigned long n)
13028+{
13029+ size_t sz = __compiletime_object_size(to);
13030+
13031+ if (unlikely(sz != (size_t)-1 && sz < n))
13032 copy_from_user_overflow();
13033-
13034+ else if (access_ok(VERIFY_READ, from, n))
13035+ n = __copy_from_user(to, from, n);
13036+ else if ((long)n > 0) {
13037+ if (!__builtin_constant_p(n))
13038+ check_object_size(to, n, false);
13039+ memset(to, 0, n);
13040+ }
13041 return n;
13042 }
13043
13044@@ -230,7 +297,7 @@ static inline unsigned long __must_check copy_from_user(void *to,
13045 #define strlen_user(str) strnlen_user(str, LONG_MAX)
13046
13047 long strnlen_user(const char __user *str, long n);
13048-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
13049-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
13050+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13051+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13052
13053 #endif /* _ASM_X86_UACCESS_32_H */
13054diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
13055index fcd4b6f..ef04f8f 100644
13056--- a/arch/x86/include/asm/uaccess_64.h
13057+++ b/arch/x86/include/asm/uaccess_64.h
13058@@ -10,6 +10,9 @@
13059 #include <asm/alternative.h>
13060 #include <asm/cpufeature.h>
13061 #include <asm/page.h>
13062+#include <asm/pgtable.h>
13063+
13064+#define set_fs(x) (current_thread_info()->addr_limit = (x))
13065
13066 /*
13067 * Copy To/From Userspace
13068@@ -17,12 +20,14 @@
13069
13070 /* Handles exceptions in both to and from, but doesn't do access_ok */
13071 __must_check unsigned long
13072-copy_user_generic_string(void *to, const void *from, unsigned len);
13073+copy_user_generic_string(void *to, const void *from, unsigned long len) __size_overflow(3);
13074 __must_check unsigned long
13075-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
13076+copy_user_generic_unrolled(void *to, const void *from, unsigned long len) __size_overflow(3);
13077
13078 static __always_inline __must_check unsigned long
13079-copy_user_generic(void *to, const void *from, unsigned len)
13080+copy_user_generic(void *to, const void *from, unsigned long len) __size_overflow(3);
13081+static __always_inline __must_check unsigned long
13082+copy_user_generic(void *to, const void *from, unsigned long len)
13083 {
13084 unsigned ret;
13085
13086@@ -32,142 +37,238 @@ copy_user_generic(void *to, const void *from, unsigned len)
13087 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
13088 "=d" (len)),
13089 "1" (to), "2" (from), "3" (len)
13090- : "memory", "rcx", "r8", "r9", "r10", "r11");
13091+ : "memory", "rcx", "r8", "r9", "r11");
13092 return ret;
13093 }
13094
13095+static __always_inline __must_check unsigned long
13096+__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
13097+static __always_inline __must_check unsigned long
13098+__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
13099 __must_check unsigned long
13100-_copy_to_user(void __user *to, const void *from, unsigned len);
13101-__must_check unsigned long
13102-_copy_from_user(void *to, const void __user *from, unsigned len);
13103-__must_check unsigned long
13104-copy_in_user(void __user *to, const void __user *from, unsigned len);
13105+copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
13106+
13107+extern void copy_to_user_overflow(void)
13108+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
13109+ __compiletime_error("copy_to_user() buffer size is not provably correct")
13110+#else
13111+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
13112+#endif
13113+;
13114+
13115+extern void copy_from_user_overflow(void)
13116+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
13117+ __compiletime_error("copy_from_user() buffer size is not provably correct")
13118+#else
13119+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
13120+#endif
13121+;
13122
13123 static inline unsigned long __must_check copy_from_user(void *to,
13124 const void __user *from,
13125 unsigned long n)
13126 {
13127- int sz = __compiletime_object_size(to);
13128-
13129 might_fault();
13130- if (likely(sz == -1 || sz >= n))
13131- n = _copy_from_user(to, from, n);
13132-#ifdef CONFIG_DEBUG_VM
13133- else
13134- WARN(1, "Buffer overflow detected!\n");
13135-#endif
13136+
13137+ if (access_ok(VERIFY_READ, from, n))
13138+ n = __copy_from_user(to, from, n);
13139+ else if (n < INT_MAX) {
13140+ if (!__builtin_constant_p(n))
13141+ check_object_size(to, n, false);
13142+ memset(to, 0, n);
13143+ }
13144 return n;
13145 }
13146
13147 static __always_inline __must_check
13148-int copy_to_user(void __user *dst, const void *src, unsigned size)
13149+int copy_to_user(void __user *dst, const void *src, unsigned long size)
13150 {
13151 might_fault();
13152
13153- return _copy_to_user(dst, src, size);
13154+ if (access_ok(VERIFY_WRITE, dst, size))
13155+ size = __copy_to_user(dst, src, size);
13156+ return size;
13157 }
13158
13159 static __always_inline __must_check
13160-int __copy_from_user(void *dst, const void __user *src, unsigned size)
13161+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
13162 {
13163- int ret = 0;
13164+ size_t sz = __compiletime_object_size(dst);
13165+ unsigned ret = 0;
13166
13167 might_fault();
13168- if (!__builtin_constant_p(size))
13169- return copy_user_generic(dst, (__force void *)src, size);
13170+
13171+ if (size > INT_MAX)
13172+ return size;
13173+
13174+#ifdef CONFIG_PAX_MEMORY_UDEREF
13175+ if (!__access_ok(VERIFY_READ, src, size))
13176+ return size;
13177+#endif
13178+
13179+ if (unlikely(sz != (size_t)-1 && sz < size)) {
13180+ copy_from_user_overflow();
13181+ return size;
13182+ }
13183+
13184+ if (!__builtin_constant_p(size)) {
13185+ check_object_size(dst, size, false);
13186+
13187+#ifdef CONFIG_PAX_MEMORY_UDEREF
13188+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13189+ src += PAX_USER_SHADOW_BASE;
13190+#endif
13191+
13192+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
13193+ }
13194 switch (size) {
13195- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
13196+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
13197 ret, "b", "b", "=q", 1);
13198 return ret;
13199- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
13200+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
13201 ret, "w", "w", "=r", 2);
13202 return ret;
13203- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
13204+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
13205 ret, "l", "k", "=r", 4);
13206 return ret;
13207- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
13208+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13209 ret, "q", "", "=r", 8);
13210 return ret;
13211 case 10:
13212- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13213+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13214 ret, "q", "", "=r", 10);
13215 if (unlikely(ret))
13216 return ret;
13217 __get_user_asm(*(u16 *)(8 + (char *)dst),
13218- (u16 __user *)(8 + (char __user *)src),
13219+ (const u16 __user *)(8 + (const char __user *)src),
13220 ret, "w", "w", "=r", 2);
13221 return ret;
13222 case 16:
13223- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13224+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13225 ret, "q", "", "=r", 16);
13226 if (unlikely(ret))
13227 return ret;
13228 __get_user_asm(*(u64 *)(8 + (char *)dst),
13229- (u64 __user *)(8 + (char __user *)src),
13230+ (const u64 __user *)(8 + (const char __user *)src),
13231 ret, "q", "", "=r", 8);
13232 return ret;
13233 default:
13234- return copy_user_generic(dst, (__force void *)src, size);
13235+
13236+#ifdef CONFIG_PAX_MEMORY_UDEREF
13237+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13238+ src += PAX_USER_SHADOW_BASE;
13239+#endif
13240+
13241+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
13242 }
13243 }
13244
13245 static __always_inline __must_check
13246-int __copy_to_user(void __user *dst, const void *src, unsigned size)
13247+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
13248 {
13249- int ret = 0;
13250+ size_t sz = __compiletime_object_size(src);
13251+ unsigned ret = 0;
13252
13253 might_fault();
13254- if (!__builtin_constant_p(size))
13255- return copy_user_generic((__force void *)dst, src, size);
13256+
13257+ if (size > INT_MAX)
13258+ return size;
13259+
13260+#ifdef CONFIG_PAX_MEMORY_UDEREF
13261+ if (!__access_ok(VERIFY_WRITE, dst, size))
13262+ return size;
13263+#endif
13264+
13265+ if (unlikely(sz != (size_t)-1 && sz < size)) {
13266+ copy_to_user_overflow();
13267+ return size;
13268+ }
13269+
13270+ if (!__builtin_constant_p(size)) {
13271+ check_object_size(src, size, true);
13272+
13273+#ifdef CONFIG_PAX_MEMORY_UDEREF
13274+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13275+ dst += PAX_USER_SHADOW_BASE;
13276+#endif
13277+
13278+ return copy_user_generic((__force_kernel void *)dst, src, size);
13279+ }
13280 switch (size) {
13281- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
13282+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
13283 ret, "b", "b", "iq", 1);
13284 return ret;
13285- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
13286+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
13287 ret, "w", "w", "ir", 2);
13288 return ret;
13289- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
13290+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
13291 ret, "l", "k", "ir", 4);
13292 return ret;
13293- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
13294+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13295 ret, "q", "", "er", 8);
13296 return ret;
13297 case 10:
13298- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13299+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13300 ret, "q", "", "er", 10);
13301 if (unlikely(ret))
13302 return ret;
13303 asm("":::"memory");
13304- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
13305+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
13306 ret, "w", "w", "ir", 2);
13307 return ret;
13308 case 16:
13309- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13310+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13311 ret, "q", "", "er", 16);
13312 if (unlikely(ret))
13313 return ret;
13314 asm("":::"memory");
13315- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
13316+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
13317 ret, "q", "", "er", 8);
13318 return ret;
13319 default:
13320- return copy_user_generic((__force void *)dst, src, size);
13321+
13322+#ifdef CONFIG_PAX_MEMORY_UDEREF
13323+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13324+ dst += PAX_USER_SHADOW_BASE;
13325+#endif
13326+
13327+ return copy_user_generic((__force_kernel void *)dst, src, size);
13328 }
13329 }
13330
13331 static __always_inline __must_check
13332-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13333+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
13334 {
13335- int ret = 0;
13336+ unsigned ret = 0;
13337
13338 might_fault();
13339- if (!__builtin_constant_p(size))
13340- return copy_user_generic((__force void *)dst,
13341- (__force void *)src, size);
13342+
13343+ if (size > INT_MAX)
13344+ return size;
13345+
13346+#ifdef CONFIG_PAX_MEMORY_UDEREF
13347+ if (!__access_ok(VERIFY_READ, src, size))
13348+ return size;
13349+ if (!__access_ok(VERIFY_WRITE, dst, size))
13350+ return size;
13351+#endif
13352+
13353+ if (!__builtin_constant_p(size)) {
13354+
13355+#ifdef CONFIG_PAX_MEMORY_UDEREF
13356+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13357+ src += PAX_USER_SHADOW_BASE;
13358+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13359+ dst += PAX_USER_SHADOW_BASE;
13360+#endif
13361+
13362+ return copy_user_generic((__force_kernel void *)dst,
13363+ (__force_kernel const void *)src, size);
13364+ }
13365 switch (size) {
13366 case 1: {
13367 u8 tmp;
13368- __get_user_asm(tmp, (u8 __user *)src,
13369+ __get_user_asm(tmp, (const u8 __user *)src,
13370 ret, "b", "b", "=q", 1);
13371 if (likely(!ret))
13372 __put_user_asm(tmp, (u8 __user *)dst,
13373@@ -176,7 +277,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13374 }
13375 case 2: {
13376 u16 tmp;
13377- __get_user_asm(tmp, (u16 __user *)src,
13378+ __get_user_asm(tmp, (const u16 __user *)src,
13379 ret, "w", "w", "=r", 2);
13380 if (likely(!ret))
13381 __put_user_asm(tmp, (u16 __user *)dst,
13382@@ -186,7 +287,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13383
13384 case 4: {
13385 u32 tmp;
13386- __get_user_asm(tmp, (u32 __user *)src,
13387+ __get_user_asm(tmp, (const u32 __user *)src,
13388 ret, "l", "k", "=r", 4);
13389 if (likely(!ret))
13390 __put_user_asm(tmp, (u32 __user *)dst,
13391@@ -195,7 +296,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13392 }
13393 case 8: {
13394 u64 tmp;
13395- __get_user_asm(tmp, (u64 __user *)src,
13396+ __get_user_asm(tmp, (const u64 __user *)src,
13397 ret, "q", "", "=r", 8);
13398 if (likely(!ret))
13399 __put_user_asm(tmp, (u64 __user *)dst,
13400@@ -203,47 +304,92 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13401 return ret;
13402 }
13403 default:
13404- return copy_user_generic((__force void *)dst,
13405- (__force void *)src, size);
13406+
13407+#ifdef CONFIG_PAX_MEMORY_UDEREF
13408+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13409+ src += PAX_USER_SHADOW_BASE;
13410+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13411+ dst += PAX_USER_SHADOW_BASE;
13412+#endif
13413+
13414+ return copy_user_generic((__force_kernel void *)dst,
13415+ (__force_kernel const void *)src, size);
13416 }
13417 }
13418
13419 __must_check long strnlen_user(const char __user *str, long n);
13420 __must_check long __strnlen_user(const char __user *str, long n);
13421 __must_check long strlen_user(const char __user *str);
13422-__must_check unsigned long clear_user(void __user *mem, unsigned long len);
13423-__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
13424+__must_check unsigned long clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13425+__must_check unsigned long __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13426
13427 static __must_check __always_inline int
13428-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
13429+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
13430 {
13431- return copy_user_generic(dst, (__force const void *)src, size);
13432+ if (size > INT_MAX)
13433+ return size;
13434+
13435+#ifdef CONFIG_PAX_MEMORY_UDEREF
13436+ if (!__access_ok(VERIFY_READ, src, size))
13437+ return size;
13438+
13439+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13440+ src += PAX_USER_SHADOW_BASE;
13441+#endif
13442+
13443+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
13444 }
13445
13446-static __must_check __always_inline int
13447-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
13448+static __must_check __always_inline unsigned long
13449+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
13450 {
13451- return copy_user_generic((__force void *)dst, src, size);
13452+ if (size > INT_MAX)
13453+ return size;
13454+
13455+#ifdef CONFIG_PAX_MEMORY_UDEREF
13456+ if (!__access_ok(VERIFY_WRITE, dst, size))
13457+ return size;
13458+
13459+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13460+ dst += PAX_USER_SHADOW_BASE;
13461+#endif
13462+
13463+ return copy_user_generic((__force_kernel void *)dst, src, size);
13464 }
13465
13466-extern long __copy_user_nocache(void *dst, const void __user *src,
13467- unsigned size, int zerorest);
13468+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
13469+ unsigned long size, int zerorest) __size_overflow(3);
13470
13471-static inline int
13472-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
13473+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
13474 {
13475 might_sleep();
13476+
13477+ if (size > INT_MAX)
13478+ return size;
13479+
13480+#ifdef CONFIG_PAX_MEMORY_UDEREF
13481+ if (!__access_ok(VERIFY_READ, src, size))
13482+ return size;
13483+#endif
13484+
13485 return __copy_user_nocache(dst, src, size, 1);
13486 }
13487
13488-static inline int
13489-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13490- unsigned size)
13491+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13492+ unsigned long size)
13493 {
13494+ if (size > INT_MAX)
13495+ return size;
13496+
13497+#ifdef CONFIG_PAX_MEMORY_UDEREF
13498+ if (!__access_ok(VERIFY_READ, src, size))
13499+ return size;
13500+#endif
13501+
13502 return __copy_user_nocache(dst, src, size, 0);
13503 }
13504
13505-unsigned long
13506-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
13507+extern unsigned long
13508+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
13509
13510 #endif /* _ASM_X86_UACCESS_64_H */
13511diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
13512index bb05228..d763d5b 100644
13513--- a/arch/x86/include/asm/vdso.h
13514+++ b/arch/x86/include/asm/vdso.h
13515@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
13516 #define VDSO32_SYMBOL(base, name) \
13517 ({ \
13518 extern const char VDSO32_##name[]; \
13519- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13520+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13521 })
13522 #endif
13523
13524diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13525index 764b66a..ad3cfc8 100644
13526--- a/arch/x86/include/asm/x86_init.h
13527+++ b/arch/x86/include/asm/x86_init.h
13528@@ -29,7 +29,7 @@ struct x86_init_mpparse {
13529 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13530 void (*find_smp_config)(void);
13531 void (*get_smp_config)(unsigned int early);
13532-};
13533+} __no_const;
13534
13535 /**
13536 * struct x86_init_resources - platform specific resource related ops
13537@@ -43,7 +43,7 @@ struct x86_init_resources {
13538 void (*probe_roms)(void);
13539 void (*reserve_resources)(void);
13540 char *(*memory_setup)(void);
13541-};
13542+} __no_const;
13543
13544 /**
13545 * struct x86_init_irqs - platform specific interrupt setup
13546@@ -56,7 +56,7 @@ struct x86_init_irqs {
13547 void (*pre_vector_init)(void);
13548 void (*intr_init)(void);
13549 void (*trap_init)(void);
13550-};
13551+} __no_const;
13552
13553 /**
13554 * struct x86_init_oem - oem platform specific customizing functions
13555@@ -66,7 +66,7 @@ struct x86_init_irqs {
13556 struct x86_init_oem {
13557 void (*arch_setup)(void);
13558 void (*banner)(void);
13559-};
13560+} __no_const;
13561
13562 /**
13563 * struct x86_init_mapping - platform specific initial kernel pagetable setup
13564@@ -77,7 +77,7 @@ struct x86_init_oem {
13565 */
13566 struct x86_init_mapping {
13567 void (*pagetable_reserve)(u64 start, u64 end);
13568-};
13569+} __no_const;
13570
13571 /**
13572 * struct x86_init_paging - platform specific paging functions
13573@@ -87,7 +87,7 @@ struct x86_init_mapping {
13574 struct x86_init_paging {
13575 void (*pagetable_setup_start)(pgd_t *base);
13576 void (*pagetable_setup_done)(pgd_t *base);
13577-};
13578+} __no_const;
13579
13580 /**
13581 * struct x86_init_timers - platform specific timer setup
13582@@ -102,7 +102,7 @@ struct x86_init_timers {
13583 void (*tsc_pre_init)(void);
13584 void (*timer_init)(void);
13585 void (*wallclock_init)(void);
13586-};
13587+} __no_const;
13588
13589 /**
13590 * struct x86_init_iommu - platform specific iommu setup
13591@@ -110,7 +110,7 @@ struct x86_init_timers {
13592 */
13593 struct x86_init_iommu {
13594 int (*iommu_init)(void);
13595-};
13596+} __no_const;
13597
13598 /**
13599 * struct x86_init_pci - platform specific pci init functions
13600@@ -124,7 +124,7 @@ struct x86_init_pci {
13601 int (*init)(void);
13602 void (*init_irq)(void);
13603 void (*fixup_irqs)(void);
13604-};
13605+} __no_const;
13606
13607 /**
13608 * struct x86_init_ops - functions for platform specific setup
13609@@ -140,7 +140,7 @@ struct x86_init_ops {
13610 struct x86_init_timers timers;
13611 struct x86_init_iommu iommu;
13612 struct x86_init_pci pci;
13613-};
13614+} __no_const;
13615
13616 /**
13617 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13618@@ -151,7 +151,7 @@ struct x86_cpuinit_ops {
13619 void (*setup_percpu_clockev)(void);
13620 void (*early_percpu_clock_init)(void);
13621 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
13622-};
13623+} __no_const;
13624
13625 /**
13626 * struct x86_platform_ops - platform specific runtime functions
13627@@ -177,7 +177,7 @@ struct x86_platform_ops {
13628 int (*i8042_detect)(void);
13629 void (*save_sched_clock_state)(void);
13630 void (*restore_sched_clock_state)(void);
13631-};
13632+} __no_const;
13633
13634 struct pci_dev;
13635
13636@@ -186,7 +186,7 @@ struct x86_msi_ops {
13637 void (*teardown_msi_irq)(unsigned int irq);
13638 void (*teardown_msi_irqs)(struct pci_dev *dev);
13639 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
13640-};
13641+} __no_const;
13642
13643 extern struct x86_init_ops x86_init;
13644 extern struct x86_cpuinit_ops x86_cpuinit;
13645diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13646index c6ce245..ffbdab7 100644
13647--- a/arch/x86/include/asm/xsave.h
13648+++ b/arch/x86/include/asm/xsave.h
13649@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13650 {
13651 int err;
13652
13653+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13654+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13655+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13656+#endif
13657+
13658 /*
13659 * Clear the xsave header first, so that reserved fields are
13660 * initialized to zero.
13661@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13662 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13663 {
13664 int err;
13665- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13666+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13667 u32 lmask = mask;
13668 u32 hmask = mask >> 32;
13669
13670+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13671+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13672+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13673+#endif
13674+
13675 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13676 "2:\n"
13677 ".section .fixup,\"ax\"\n"
13678diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13679index 6a564ac..9b1340c 100644
13680--- a/arch/x86/kernel/acpi/realmode/Makefile
13681+++ b/arch/x86/kernel/acpi/realmode/Makefile
13682@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13683 $(call cc-option, -fno-stack-protector) \
13684 $(call cc-option, -mpreferred-stack-boundary=2)
13685 KBUILD_CFLAGS += $(call cc-option, -m32)
13686+ifdef CONSTIFY_PLUGIN
13687+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13688+endif
13689 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13690 GCOV_PROFILE := n
13691
13692diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13693index b4fd836..4358fe3 100644
13694--- a/arch/x86/kernel/acpi/realmode/wakeup.S
13695+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13696@@ -108,6 +108,9 @@ wakeup_code:
13697 /* Do any other stuff... */
13698
13699 #ifndef CONFIG_64BIT
13700+ /* Recheck NX bit overrides (64bit path does this in trampoline */
13701+ call verify_cpu
13702+
13703 /* This could also be done in C code... */
13704 movl pmode_cr3, %eax
13705 movl %eax, %cr3
13706@@ -131,6 +134,7 @@ wakeup_code:
13707 movl pmode_cr0, %eax
13708 movl %eax, %cr0
13709 jmp pmode_return
13710+# include "../../verify_cpu.S"
13711 #else
13712 pushw $0
13713 pushw trampoline_segment
13714diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13715index 146a49c..1b5338b 100644
13716--- a/arch/x86/kernel/acpi/sleep.c
13717+++ b/arch/x86/kernel/acpi/sleep.c
13718@@ -98,8 +98,12 @@ int acpi_suspend_lowlevel(void)
13719 header->trampoline_segment = trampoline_address() >> 4;
13720 #ifdef CONFIG_SMP
13721 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13722+
13723+ pax_open_kernel();
13724 early_gdt_descr.address =
13725 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13726+ pax_close_kernel();
13727+
13728 initial_gs = per_cpu_offset(smp_processor_id());
13729 #endif
13730 initial_code = (unsigned long)wakeup_long64;
13731diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13732index 7261083..5c12053 100644
13733--- a/arch/x86/kernel/acpi/wakeup_32.S
13734+++ b/arch/x86/kernel/acpi/wakeup_32.S
13735@@ -30,13 +30,11 @@ wakeup_pmode_return:
13736 # and restore the stack ... but you need gdt for this to work
13737 movl saved_context_esp, %esp
13738
13739- movl %cs:saved_magic, %eax
13740- cmpl $0x12345678, %eax
13741+ cmpl $0x12345678, saved_magic
13742 jne bogus_magic
13743
13744 # jump to place where we left off
13745- movl saved_eip, %eax
13746- jmp *%eax
13747+ jmp *(saved_eip)
13748
13749 bogus_magic:
13750 jmp bogus_magic
13751diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13752index 1f84794..e23f862 100644
13753--- a/arch/x86/kernel/alternative.c
13754+++ b/arch/x86/kernel/alternative.c
13755@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
13756 */
13757 for (a = start; a < end; a++) {
13758 instr = (u8 *)&a->instr_offset + a->instr_offset;
13759+
13760+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13761+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13762+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
13763+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13764+#endif
13765+
13766 replacement = (u8 *)&a->repl_offset + a->repl_offset;
13767 BUG_ON(a->replacementlen > a->instrlen);
13768 BUG_ON(a->instrlen > sizeof(insnbuf));
13769@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
13770 for (poff = start; poff < end; poff++) {
13771 u8 *ptr = (u8 *)poff + *poff;
13772
13773+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13774+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13775+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13776+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13777+#endif
13778+
13779 if (!*poff || ptr < text || ptr >= text_end)
13780 continue;
13781 /* turn DS segment override prefix into lock prefix */
13782- if (*ptr == 0x3e)
13783+ if (*ktla_ktva(ptr) == 0x3e)
13784 text_poke(ptr, ((unsigned char []){0xf0}), 1);
13785 };
13786 mutex_unlock(&text_mutex);
13787@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
13788 for (poff = start; poff < end; poff++) {
13789 u8 *ptr = (u8 *)poff + *poff;
13790
13791+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13792+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13793+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13794+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13795+#endif
13796+
13797 if (!*poff || ptr < text || ptr >= text_end)
13798 continue;
13799 /* turn lock prefix into DS segment override prefix */
13800- if (*ptr == 0xf0)
13801+ if (*ktla_ktva(ptr) == 0xf0)
13802 text_poke(ptr, ((unsigned char []){0x3E}), 1);
13803 };
13804 mutex_unlock(&text_mutex);
13805@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13806
13807 BUG_ON(p->len > MAX_PATCH_LEN);
13808 /* prep the buffer with the original instructions */
13809- memcpy(insnbuf, p->instr, p->len);
13810+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13811 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13812 (unsigned long)p->instr, p->len);
13813
13814@@ -568,7 +587,7 @@ void __init alternative_instructions(void)
13815 if (smp_alt_once)
13816 free_init_pages("SMP alternatives",
13817 (unsigned long)__smp_locks,
13818- (unsigned long)__smp_locks_end);
13819+ PAGE_ALIGN((unsigned long)__smp_locks_end));
13820
13821 restart_nmi();
13822 }
13823@@ -585,13 +604,17 @@ void __init alternative_instructions(void)
13824 * instructions. And on the local CPU you need to be protected again NMI or MCE
13825 * handlers seeing an inconsistent instruction while you patch.
13826 */
13827-void *__init_or_module text_poke_early(void *addr, const void *opcode,
13828+void *__kprobes text_poke_early(void *addr, const void *opcode,
13829 size_t len)
13830 {
13831 unsigned long flags;
13832 local_irq_save(flags);
13833- memcpy(addr, opcode, len);
13834+
13835+ pax_open_kernel();
13836+ memcpy(ktla_ktva(addr), opcode, len);
13837 sync_core();
13838+ pax_close_kernel();
13839+
13840 local_irq_restore(flags);
13841 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13842 that causes hangs on some VIA CPUs. */
13843@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
13844 */
13845 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13846 {
13847- unsigned long flags;
13848- char *vaddr;
13849+ unsigned char *vaddr = ktla_ktva(addr);
13850 struct page *pages[2];
13851- int i;
13852+ size_t i;
13853
13854 if (!core_kernel_text((unsigned long)addr)) {
13855- pages[0] = vmalloc_to_page(addr);
13856- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13857+ pages[0] = vmalloc_to_page(vaddr);
13858+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13859 } else {
13860- pages[0] = virt_to_page(addr);
13861+ pages[0] = virt_to_page(vaddr);
13862 WARN_ON(!PageReserved(pages[0]));
13863- pages[1] = virt_to_page(addr + PAGE_SIZE);
13864+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13865 }
13866 BUG_ON(!pages[0]);
13867- local_irq_save(flags);
13868- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13869- if (pages[1])
13870- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13871- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13872- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13873- clear_fixmap(FIX_TEXT_POKE0);
13874- if (pages[1])
13875- clear_fixmap(FIX_TEXT_POKE1);
13876- local_flush_tlb();
13877- sync_core();
13878- /* Could also do a CLFLUSH here to speed up CPU recovery; but
13879- that causes hangs on some VIA CPUs. */
13880+ text_poke_early(addr, opcode, len);
13881 for (i = 0; i < len; i++)
13882- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13883- local_irq_restore(flags);
13884+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13885 return addr;
13886 }
13887
13888diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13889index edc2448..553e7c5 100644
13890--- a/arch/x86/kernel/apic/apic.c
13891+++ b/arch/x86/kernel/apic/apic.c
13892@@ -184,7 +184,7 @@ int first_system_vector = 0xfe;
13893 /*
13894 * Debug level, exported for io_apic.c
13895 */
13896-unsigned int apic_verbosity;
13897+int apic_verbosity;
13898
13899 int pic_mode;
13900
13901@@ -1917,7 +1917,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13902 apic_write(APIC_ESR, 0);
13903 v1 = apic_read(APIC_ESR);
13904 ack_APIC_irq();
13905- atomic_inc(&irq_err_count);
13906+ atomic_inc_unchecked(&irq_err_count);
13907
13908 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
13909 smp_processor_id(), v0 , v1);
13910diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13911index e88300d..cd5a87a 100644
13912--- a/arch/x86/kernel/apic/io_apic.c
13913+++ b/arch/x86/kernel/apic/io_apic.c
13914@@ -83,7 +83,9 @@ static struct io_apic_ops io_apic_ops = {
13915
13916 void __init set_io_apic_ops(const struct io_apic_ops *ops)
13917 {
13918- io_apic_ops = *ops;
13919+ pax_open_kernel();
13920+ memcpy((void*)&io_apic_ops, ops, sizeof io_apic_ops);
13921+ pax_close_kernel();
13922 }
13923
13924 /*
13925@@ -1135,7 +1137,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13926 }
13927 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13928
13929-void lock_vector_lock(void)
13930+void lock_vector_lock(void) __acquires(vector_lock)
13931 {
13932 /* Used to the online set of cpus does not change
13933 * during assign_irq_vector.
13934@@ -1143,7 +1145,7 @@ void lock_vector_lock(void)
13935 raw_spin_lock(&vector_lock);
13936 }
13937
13938-void unlock_vector_lock(void)
13939+void unlock_vector_lock(void) __releases(vector_lock)
13940 {
13941 raw_spin_unlock(&vector_lock);
13942 }
13943@@ -2549,7 +2551,7 @@ static void ack_apic_edge(struct irq_data *data)
13944 ack_APIC_irq();
13945 }
13946
13947-atomic_t irq_mis_count;
13948+atomic_unchecked_t irq_mis_count;
13949
13950 #ifdef CONFIG_GENERIC_PENDING_IRQ
13951 static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
13952@@ -2667,7 +2669,7 @@ static void ack_apic_level(struct irq_data *data)
13953 * at the cpu.
13954 */
13955 if (!(v & (1 << (i & 0x1f)))) {
13956- atomic_inc(&irq_mis_count);
13957+ atomic_inc_unchecked(&irq_mis_count);
13958
13959 eoi_ioapic_irq(irq, cfg);
13960 }
13961diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13962index 459e78c..f037006 100644
13963--- a/arch/x86/kernel/apm_32.c
13964+++ b/arch/x86/kernel/apm_32.c
13965@@ -410,7 +410,7 @@ static DEFINE_MUTEX(apm_mutex);
13966 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13967 * even though they are called in protected mode.
13968 */
13969-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13970+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13971 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13972
13973 static const char driver_version[] = "1.16ac"; /* no spaces */
13974@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
13975 BUG_ON(cpu != 0);
13976 gdt = get_cpu_gdt_table(cpu);
13977 save_desc_40 = gdt[0x40 / 8];
13978+
13979+ pax_open_kernel();
13980 gdt[0x40 / 8] = bad_bios_desc;
13981+ pax_close_kernel();
13982
13983 apm_irq_save(flags);
13984 APM_DO_SAVE_SEGS;
13985@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
13986 &call->esi);
13987 APM_DO_RESTORE_SEGS;
13988 apm_irq_restore(flags);
13989+
13990+ pax_open_kernel();
13991 gdt[0x40 / 8] = save_desc_40;
13992+ pax_close_kernel();
13993+
13994 put_cpu();
13995
13996 return call->eax & 0xff;
13997@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
13998 BUG_ON(cpu != 0);
13999 gdt = get_cpu_gdt_table(cpu);
14000 save_desc_40 = gdt[0x40 / 8];
14001+
14002+ pax_open_kernel();
14003 gdt[0x40 / 8] = bad_bios_desc;
14004+ pax_close_kernel();
14005
14006 apm_irq_save(flags);
14007 APM_DO_SAVE_SEGS;
14008@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
14009 &call->eax);
14010 APM_DO_RESTORE_SEGS;
14011 apm_irq_restore(flags);
14012+
14013+ pax_open_kernel();
14014 gdt[0x40 / 8] = save_desc_40;
14015+ pax_close_kernel();
14016+
14017 put_cpu();
14018 return error;
14019 }
14020@@ -2345,12 +2359,15 @@ static int __init apm_init(void)
14021 * code to that CPU.
14022 */
14023 gdt = get_cpu_gdt_table(0);
14024+
14025+ pax_open_kernel();
14026 set_desc_base(&gdt[APM_CS >> 3],
14027 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
14028 set_desc_base(&gdt[APM_CS_16 >> 3],
14029 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
14030 set_desc_base(&gdt[APM_DS >> 3],
14031 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
14032+ pax_close_kernel();
14033
14034 proc_create("apm", 0, NULL, &apm_file_ops);
14035
14036diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
14037index 68de2dc..1f3c720 100644
14038--- a/arch/x86/kernel/asm-offsets.c
14039+++ b/arch/x86/kernel/asm-offsets.c
14040@@ -33,6 +33,8 @@ void common(void) {
14041 OFFSET(TI_status, thread_info, status);
14042 OFFSET(TI_addr_limit, thread_info, addr_limit);
14043 OFFSET(TI_preempt_count, thread_info, preempt_count);
14044+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
14045+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
14046
14047 BLANK();
14048 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
14049@@ -53,8 +55,26 @@ void common(void) {
14050 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
14051 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
14052 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
14053+
14054+#ifdef CONFIG_PAX_KERNEXEC
14055+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
14056 #endif
14057
14058+#ifdef CONFIG_PAX_MEMORY_UDEREF
14059+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
14060+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
14061+#ifdef CONFIG_X86_64
14062+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
14063+#endif
14064+#endif
14065+
14066+#endif
14067+
14068+ BLANK();
14069+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
14070+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
14071+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
14072+
14073 #ifdef CONFIG_XEN
14074 BLANK();
14075 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
14076diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
14077index 1b4754f..fbb4227 100644
14078--- a/arch/x86/kernel/asm-offsets_64.c
14079+++ b/arch/x86/kernel/asm-offsets_64.c
14080@@ -76,6 +76,7 @@ int main(void)
14081 BLANK();
14082 #undef ENTRY
14083
14084+ DEFINE(TSS_size, sizeof(struct tss_struct));
14085 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
14086 BLANK();
14087
14088diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
14089index 6ab6aa2..8f71507 100644
14090--- a/arch/x86/kernel/cpu/Makefile
14091+++ b/arch/x86/kernel/cpu/Makefile
14092@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
14093 CFLAGS_REMOVE_perf_event.o = -pg
14094 endif
14095
14096-# Make sure load_percpu_segment has no stackprotector
14097-nostackp := $(call cc-option, -fno-stack-protector)
14098-CFLAGS_common.o := $(nostackp)
14099-
14100 obj-y := intel_cacheinfo.o scattered.o topology.o
14101 obj-y += proc.o capflags.o powerflags.o common.o
14102 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
14103diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
14104index 146bb62..ac9c74a 100644
14105--- a/arch/x86/kernel/cpu/amd.c
14106+++ b/arch/x86/kernel/cpu/amd.c
14107@@ -691,7 +691,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
14108 unsigned int size)
14109 {
14110 /* AMD errata T13 (order #21922) */
14111- if ((c->x86 == 6)) {
14112+ if (c->x86 == 6) {
14113 /* Duron Rev A0 */
14114 if (c->x86_model == 3 && c->x86_mask == 0)
14115 size = 64;
14116diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
14117index cf79302..b1b28ae 100644
14118--- a/arch/x86/kernel/cpu/common.c
14119+++ b/arch/x86/kernel/cpu/common.c
14120@@ -86,60 +86,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
14121
14122 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
14123
14124-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
14125-#ifdef CONFIG_X86_64
14126- /*
14127- * We need valid kernel segments for data and code in long mode too
14128- * IRET will check the segment types kkeil 2000/10/28
14129- * Also sysret mandates a special GDT layout
14130- *
14131- * TLS descriptors are currently at a different place compared to i386.
14132- * Hopefully nobody expects them at a fixed place (Wine?)
14133- */
14134- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
14135- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
14136- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
14137- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
14138- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
14139- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
14140-#else
14141- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
14142- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14143- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
14144- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
14145- /*
14146- * Segments used for calling PnP BIOS have byte granularity.
14147- * They code segments and data segments have fixed 64k limits,
14148- * the transfer segment sizes are set at run time.
14149- */
14150- /* 32-bit code */
14151- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14152- /* 16-bit code */
14153- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14154- /* 16-bit data */
14155- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
14156- /* 16-bit data */
14157- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
14158- /* 16-bit data */
14159- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
14160- /*
14161- * The APM segments have byte granularity and their bases
14162- * are set at run time. All have 64k limits.
14163- */
14164- /* 32-bit code */
14165- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14166- /* 16-bit code */
14167- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14168- /* data */
14169- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
14170-
14171- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14172- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14173- GDT_STACK_CANARY_INIT
14174-#endif
14175-} };
14176-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
14177-
14178 static int __init x86_xsave_setup(char *s)
14179 {
14180 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
14181@@ -374,7 +320,7 @@ void switch_to_new_gdt(int cpu)
14182 {
14183 struct desc_ptr gdt_descr;
14184
14185- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
14186+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14187 gdt_descr.size = GDT_SIZE - 1;
14188 load_gdt(&gdt_descr);
14189 /* Reload the per-cpu base */
14190@@ -841,6 +787,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
14191 /* Filter out anything that depends on CPUID levels we don't have */
14192 filter_cpuid_features(c, true);
14193
14194+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14195+ setup_clear_cpu_cap(X86_FEATURE_SEP);
14196+#endif
14197+
14198 /* If the model name is still unset, do table lookup. */
14199 if (!c->x86_model_id[0]) {
14200 const char *p;
14201@@ -1021,10 +971,12 @@ static __init int setup_disablecpuid(char *arg)
14202 }
14203 __setup("clearcpuid=", setup_disablecpuid);
14204
14205+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
14206+EXPORT_PER_CPU_SYMBOL(current_tinfo);
14207+
14208 #ifdef CONFIG_X86_64
14209 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
14210-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
14211- (unsigned long) nmi_idt_table };
14212+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
14213
14214 DEFINE_PER_CPU_FIRST(union irq_stack_union,
14215 irq_stack_union) __aligned(PAGE_SIZE);
14216@@ -1038,7 +990,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
14217 EXPORT_PER_CPU_SYMBOL(current_task);
14218
14219 DEFINE_PER_CPU(unsigned long, kernel_stack) =
14220- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
14221+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
14222 EXPORT_PER_CPU_SYMBOL(kernel_stack);
14223
14224 DEFINE_PER_CPU(char *, irq_stack_ptr) =
14225@@ -1126,7 +1078,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
14226 {
14227 memset(regs, 0, sizeof(struct pt_regs));
14228 regs->fs = __KERNEL_PERCPU;
14229- regs->gs = __KERNEL_STACK_CANARY;
14230+ savesegment(gs, regs->gs);
14231
14232 return regs;
14233 }
14234@@ -1181,7 +1133,7 @@ void __cpuinit cpu_init(void)
14235 int i;
14236
14237 cpu = stack_smp_processor_id();
14238- t = &per_cpu(init_tss, cpu);
14239+ t = init_tss + cpu;
14240 oist = &per_cpu(orig_ist, cpu);
14241
14242 #ifdef CONFIG_NUMA
14243@@ -1207,7 +1159,7 @@ void __cpuinit cpu_init(void)
14244 switch_to_new_gdt(cpu);
14245 loadsegment(fs, 0);
14246
14247- load_idt((const struct desc_ptr *)&idt_descr);
14248+ load_idt(&idt_descr);
14249
14250 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
14251 syscall_init();
14252@@ -1216,7 +1168,6 @@ void __cpuinit cpu_init(void)
14253 wrmsrl(MSR_KERNEL_GS_BASE, 0);
14254 barrier();
14255
14256- x86_configure_nx();
14257 if (cpu != 0)
14258 enable_x2apic();
14259
14260@@ -1272,7 +1223,7 @@ void __cpuinit cpu_init(void)
14261 {
14262 int cpu = smp_processor_id();
14263 struct task_struct *curr = current;
14264- struct tss_struct *t = &per_cpu(init_tss, cpu);
14265+ struct tss_struct *t = init_tss + cpu;
14266 struct thread_struct *thread = &curr->thread;
14267
14268 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
14269diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
14270index 3e6ff6c..54b4992 100644
14271--- a/arch/x86/kernel/cpu/intel.c
14272+++ b/arch/x86/kernel/cpu/intel.c
14273@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
14274 * Update the IDT descriptor and reload the IDT so that
14275 * it uses the read-only mapped virtual address.
14276 */
14277- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
14278+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
14279 load_idt(&idt_descr);
14280 }
14281 #endif
14282diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
14283index 61604ae..98250a5 100644
14284--- a/arch/x86/kernel/cpu/mcheck/mce.c
14285+++ b/arch/x86/kernel/cpu/mcheck/mce.c
14286@@ -42,6 +42,7 @@
14287 #include <asm/processor.h>
14288 #include <asm/mce.h>
14289 #include <asm/msr.h>
14290+#include <asm/local.h>
14291
14292 #include "mce-internal.h"
14293
14294@@ -250,7 +251,7 @@ static void print_mce(struct mce *m)
14295 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
14296 m->cs, m->ip);
14297
14298- if (m->cs == __KERNEL_CS)
14299+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
14300 print_symbol("{%s}", m->ip);
14301 pr_cont("\n");
14302 }
14303@@ -283,10 +284,10 @@ static void print_mce(struct mce *m)
14304
14305 #define PANIC_TIMEOUT 5 /* 5 seconds */
14306
14307-static atomic_t mce_paniced;
14308+static atomic_unchecked_t mce_paniced;
14309
14310 static int fake_panic;
14311-static atomic_t mce_fake_paniced;
14312+static atomic_unchecked_t mce_fake_paniced;
14313
14314 /* Panic in progress. Enable interrupts and wait for final IPI */
14315 static void wait_for_panic(void)
14316@@ -310,7 +311,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14317 /*
14318 * Make sure only one CPU runs in machine check panic
14319 */
14320- if (atomic_inc_return(&mce_paniced) > 1)
14321+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
14322 wait_for_panic();
14323 barrier();
14324
14325@@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14326 console_verbose();
14327 } else {
14328 /* Don't log too much for fake panic */
14329- if (atomic_inc_return(&mce_fake_paniced) > 1)
14330+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
14331 return;
14332 }
14333 /* First print corrected ones that are still unlogged */
14334@@ -684,7 +685,7 @@ static int mce_timed_out(u64 *t)
14335 * might have been modified by someone else.
14336 */
14337 rmb();
14338- if (atomic_read(&mce_paniced))
14339+ if (atomic_read_unchecked(&mce_paniced))
14340 wait_for_panic();
14341 if (!monarch_timeout)
14342 goto out;
14343@@ -1535,7 +1536,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
14344 }
14345
14346 /* Call the installed machine check handler for this CPU setup. */
14347-void (*machine_check_vector)(struct pt_regs *, long error_code) =
14348+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
14349 unexpected_machine_check;
14350
14351 /*
14352@@ -1558,7 +1559,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14353 return;
14354 }
14355
14356+ pax_open_kernel();
14357 machine_check_vector = do_machine_check;
14358+ pax_close_kernel();
14359
14360 __mcheck_cpu_init_generic();
14361 __mcheck_cpu_init_vendor(c);
14362@@ -1572,7 +1575,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14363 */
14364
14365 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
14366-static int mce_chrdev_open_count; /* #times opened */
14367+static local_t mce_chrdev_open_count; /* #times opened */
14368 static int mce_chrdev_open_exclu; /* already open exclusive? */
14369
14370 static int mce_chrdev_open(struct inode *inode, struct file *file)
14371@@ -1580,7 +1583,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14372 spin_lock(&mce_chrdev_state_lock);
14373
14374 if (mce_chrdev_open_exclu ||
14375- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
14376+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
14377 spin_unlock(&mce_chrdev_state_lock);
14378
14379 return -EBUSY;
14380@@ -1588,7 +1591,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14381
14382 if (file->f_flags & O_EXCL)
14383 mce_chrdev_open_exclu = 1;
14384- mce_chrdev_open_count++;
14385+ local_inc(&mce_chrdev_open_count);
14386
14387 spin_unlock(&mce_chrdev_state_lock);
14388
14389@@ -1599,7 +1602,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
14390 {
14391 spin_lock(&mce_chrdev_state_lock);
14392
14393- mce_chrdev_open_count--;
14394+ local_dec(&mce_chrdev_open_count);
14395 mce_chrdev_open_exclu = 0;
14396
14397 spin_unlock(&mce_chrdev_state_lock);
14398@@ -2324,7 +2327,7 @@ struct dentry *mce_get_debugfs_dir(void)
14399 static void mce_reset(void)
14400 {
14401 cpu_missing = 0;
14402- atomic_set(&mce_fake_paniced, 0);
14403+ atomic_set_unchecked(&mce_fake_paniced, 0);
14404 atomic_set(&mce_executing, 0);
14405 atomic_set(&mce_callin, 0);
14406 atomic_set(&global_nwo, 0);
14407diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
14408index 2d5454c..51987eb 100644
14409--- a/arch/x86/kernel/cpu/mcheck/p5.c
14410+++ b/arch/x86/kernel/cpu/mcheck/p5.c
14411@@ -11,6 +11,7 @@
14412 #include <asm/processor.h>
14413 #include <asm/mce.h>
14414 #include <asm/msr.h>
14415+#include <asm/pgtable.h>
14416
14417 /* By default disabled */
14418 int mce_p5_enabled __read_mostly;
14419@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
14420 if (!cpu_has(c, X86_FEATURE_MCE))
14421 return;
14422
14423+ pax_open_kernel();
14424 machine_check_vector = pentium_machine_check;
14425+ pax_close_kernel();
14426 /* Make sure the vector pointer is visible before we enable MCEs: */
14427 wmb();
14428
14429diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
14430index 2d7998f..17c9de1 100644
14431--- a/arch/x86/kernel/cpu/mcheck/winchip.c
14432+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
14433@@ -10,6 +10,7 @@
14434 #include <asm/processor.h>
14435 #include <asm/mce.h>
14436 #include <asm/msr.h>
14437+#include <asm/pgtable.h>
14438
14439 /* Machine check handler for WinChip C6: */
14440 static void winchip_machine_check(struct pt_regs *regs, long error_code)
14441@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
14442 {
14443 u32 lo, hi;
14444
14445+ pax_open_kernel();
14446 machine_check_vector = winchip_machine_check;
14447+ pax_close_kernel();
14448 /* Make sure the vector pointer is visible before we enable MCEs: */
14449 wmb();
14450
14451diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14452index 6b96110..0da73eb 100644
14453--- a/arch/x86/kernel/cpu/mtrr/main.c
14454+++ b/arch/x86/kernel/cpu/mtrr/main.c
14455@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
14456 u64 size_or_mask, size_and_mask;
14457 static bool mtrr_aps_delayed_init;
14458
14459-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14460+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14461
14462 const struct mtrr_ops *mtrr_if;
14463
14464diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14465index df5e41f..816c719 100644
14466--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14467+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14468@@ -25,7 +25,7 @@ struct mtrr_ops {
14469 int (*validate_add_page)(unsigned long base, unsigned long size,
14470 unsigned int type);
14471 int (*have_wrcomb)(void);
14472-};
14473+} __do_const;
14474
14475 extern int generic_get_free_region(unsigned long base, unsigned long size,
14476 int replace_reg);
14477diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14478index bb8e034..fb9020b 100644
14479--- a/arch/x86/kernel/cpu/perf_event.c
14480+++ b/arch/x86/kernel/cpu/perf_event.c
14481@@ -1835,7 +1835,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
14482 break;
14483
14484 perf_callchain_store(entry, frame.return_address);
14485- fp = frame.next_frame;
14486+ fp = (const void __force_user *)frame.next_frame;
14487 }
14488 }
14489
14490diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14491index 13ad899..f642b9a 100644
14492--- a/arch/x86/kernel/crash.c
14493+++ b/arch/x86/kernel/crash.c
14494@@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
14495 {
14496 #ifdef CONFIG_X86_32
14497 struct pt_regs fixed_regs;
14498-#endif
14499
14500-#ifdef CONFIG_X86_32
14501- if (!user_mode_vm(regs)) {
14502+ if (!user_mode(regs)) {
14503 crash_fixup_ss_esp(&fixed_regs, regs);
14504 regs = &fixed_regs;
14505 }
14506diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14507index 37250fe..bf2ec74 100644
14508--- a/arch/x86/kernel/doublefault_32.c
14509+++ b/arch/x86/kernel/doublefault_32.c
14510@@ -11,7 +11,7 @@
14511
14512 #define DOUBLEFAULT_STACKSIZE (1024)
14513 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14514-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14515+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14516
14517 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14518
14519@@ -21,7 +21,7 @@ static void doublefault_fn(void)
14520 unsigned long gdt, tss;
14521
14522 store_gdt(&gdt_desc);
14523- gdt = gdt_desc.address;
14524+ gdt = (unsigned long)gdt_desc.address;
14525
14526 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14527
14528@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14529 /* 0x2 bit is always set */
14530 .flags = X86_EFLAGS_SF | 0x2,
14531 .sp = STACK_START,
14532- .es = __USER_DS,
14533+ .es = __KERNEL_DS,
14534 .cs = __KERNEL_CS,
14535 .ss = __KERNEL_DS,
14536- .ds = __USER_DS,
14537+ .ds = __KERNEL_DS,
14538 .fs = __KERNEL_PERCPU,
14539
14540 .__cr3 = __pa_nodebug(swapper_pg_dir),
14541diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14542index 1b81839..0b4e7b0 100644
14543--- a/arch/x86/kernel/dumpstack.c
14544+++ b/arch/x86/kernel/dumpstack.c
14545@@ -2,6 +2,9 @@
14546 * Copyright (C) 1991, 1992 Linus Torvalds
14547 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14548 */
14549+#ifdef CONFIG_GRKERNSEC_HIDESYM
14550+#define __INCLUDED_BY_HIDESYM 1
14551+#endif
14552 #include <linux/kallsyms.h>
14553 #include <linux/kprobes.h>
14554 #include <linux/uaccess.h>
14555@@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
14556 static void
14557 print_ftrace_graph_addr(unsigned long addr, void *data,
14558 const struct stacktrace_ops *ops,
14559- struct thread_info *tinfo, int *graph)
14560+ struct task_struct *task, int *graph)
14561 {
14562- struct task_struct *task;
14563 unsigned long ret_addr;
14564 int index;
14565
14566 if (addr != (unsigned long)return_to_handler)
14567 return;
14568
14569- task = tinfo->task;
14570 index = task->curr_ret_stack;
14571
14572 if (!task->ret_stack || index < *graph)
14573@@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14574 static inline void
14575 print_ftrace_graph_addr(unsigned long addr, void *data,
14576 const struct stacktrace_ops *ops,
14577- struct thread_info *tinfo, int *graph)
14578+ struct task_struct *task, int *graph)
14579 { }
14580 #endif
14581
14582@@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14583 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14584 */
14585
14586-static inline int valid_stack_ptr(struct thread_info *tinfo,
14587- void *p, unsigned int size, void *end)
14588+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14589 {
14590- void *t = tinfo;
14591 if (end) {
14592 if (p < end && p >= (end-THREAD_SIZE))
14593 return 1;
14594@@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14595 }
14596
14597 unsigned long
14598-print_context_stack(struct thread_info *tinfo,
14599+print_context_stack(struct task_struct *task, void *stack_start,
14600 unsigned long *stack, unsigned long bp,
14601 const struct stacktrace_ops *ops, void *data,
14602 unsigned long *end, int *graph)
14603 {
14604 struct stack_frame *frame = (struct stack_frame *)bp;
14605
14606- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14607+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14608 unsigned long addr;
14609
14610 addr = *stack;
14611@@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
14612 } else {
14613 ops->address(data, addr, 0);
14614 }
14615- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14616+ print_ftrace_graph_addr(addr, data, ops, task, graph);
14617 }
14618 stack++;
14619 }
14620@@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
14621 EXPORT_SYMBOL_GPL(print_context_stack);
14622
14623 unsigned long
14624-print_context_stack_bp(struct thread_info *tinfo,
14625+print_context_stack_bp(struct task_struct *task, void *stack_start,
14626 unsigned long *stack, unsigned long bp,
14627 const struct stacktrace_ops *ops, void *data,
14628 unsigned long *end, int *graph)
14629@@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14630 struct stack_frame *frame = (struct stack_frame *)bp;
14631 unsigned long *ret_addr = &frame->return_address;
14632
14633- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
14634+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
14635 unsigned long addr = *ret_addr;
14636
14637 if (!__kernel_text_address(addr))
14638@@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14639 ops->address(data, addr, 1);
14640 frame = frame->next_frame;
14641 ret_addr = &frame->return_address;
14642- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14643+ print_ftrace_graph_addr(addr, data, ops, task, graph);
14644 }
14645
14646 return (unsigned long)frame;
14647@@ -189,7 +188,7 @@ void dump_stack(void)
14648
14649 bp = stack_frame(current, NULL);
14650 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14651- current->pid, current->comm, print_tainted(),
14652+ task_pid_nr(current), current->comm, print_tainted(),
14653 init_utsname()->release,
14654 (int)strcspn(init_utsname()->version, " "),
14655 init_utsname()->version);
14656@@ -225,6 +224,8 @@ unsigned __kprobes long oops_begin(void)
14657 }
14658 EXPORT_SYMBOL_GPL(oops_begin);
14659
14660+extern void gr_handle_kernel_exploit(void);
14661+
14662 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14663 {
14664 if (regs && kexec_should_crash(current))
14665@@ -246,7 +247,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14666 panic("Fatal exception in interrupt");
14667 if (panic_on_oops)
14668 panic("Fatal exception");
14669- do_exit(signr);
14670+
14671+ gr_handle_kernel_exploit();
14672+
14673+ do_group_exit(signr);
14674 }
14675
14676 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14677@@ -273,7 +277,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14678
14679 show_registers(regs);
14680 #ifdef CONFIG_X86_32
14681- if (user_mode_vm(regs)) {
14682+ if (user_mode(regs)) {
14683 sp = regs->sp;
14684 ss = regs->ss & 0xffff;
14685 } else {
14686@@ -301,7 +305,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14687 unsigned long flags = oops_begin();
14688 int sig = SIGSEGV;
14689
14690- if (!user_mode_vm(regs))
14691+ if (!user_mode(regs))
14692 report_bug(regs->ip, regs);
14693
14694 if (__die(str, regs, err))
14695diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14696index 88ec912..e95e935 100644
14697--- a/arch/x86/kernel/dumpstack_32.c
14698+++ b/arch/x86/kernel/dumpstack_32.c
14699@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14700 bp = stack_frame(task, regs);
14701
14702 for (;;) {
14703- struct thread_info *context;
14704+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14705
14706- context = (struct thread_info *)
14707- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14708- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
14709+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14710
14711- stack = (unsigned long *)context->previous_esp;
14712- if (!stack)
14713+ if (stack_start == task_stack_page(task))
14714 break;
14715+ stack = *(unsigned long **)stack_start;
14716 if (ops->stack(data, "IRQ") < 0)
14717 break;
14718 touch_nmi_watchdog();
14719@@ -87,7 +85,7 @@ void show_registers(struct pt_regs *regs)
14720 int i;
14721
14722 print_modules();
14723- __show_regs(regs, !user_mode_vm(regs));
14724+ __show_regs(regs, !user_mode(regs));
14725
14726 printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
14727 TASK_COMM_LEN, current->comm, task_pid_nr(current),
14728@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
14729 * When in-kernel, we also print out the stack and code at the
14730 * time of the fault..
14731 */
14732- if (!user_mode_vm(regs)) {
14733+ if (!user_mode(regs)) {
14734 unsigned int code_prologue = code_bytes * 43 / 64;
14735 unsigned int code_len = code_bytes;
14736 unsigned char c;
14737 u8 *ip;
14738+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14739
14740 printk(KERN_EMERG "Stack:\n");
14741 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
14742
14743 printk(KERN_EMERG "Code: ");
14744
14745- ip = (u8 *)regs->ip - code_prologue;
14746+ ip = (u8 *)regs->ip - code_prologue + cs_base;
14747 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14748 /* try starting at IP */
14749- ip = (u8 *)regs->ip;
14750+ ip = (u8 *)regs->ip + cs_base;
14751 code_len = code_len - code_prologue + 1;
14752 }
14753 for (i = 0; i < code_len; i++, ip++) {
14754@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
14755 printk(KERN_CONT " Bad EIP value.");
14756 break;
14757 }
14758- if (ip == (u8 *)regs->ip)
14759+ if (ip == (u8 *)regs->ip + cs_base)
14760 printk(KERN_CONT "<%02x> ", c);
14761 else
14762 printk(KERN_CONT "%02x ", c);
14763@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
14764 {
14765 unsigned short ud2;
14766
14767+ ip = ktla_ktva(ip);
14768 if (ip < PAGE_OFFSET)
14769 return 0;
14770 if (probe_kernel_address((unsigned short *)ip, ud2))
14771@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
14772
14773 return ud2 == 0x0b0f;
14774 }
14775+
14776+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14777+void pax_check_alloca(unsigned long size)
14778+{
14779+ unsigned long sp = (unsigned long)&sp, stack_left;
14780+
14781+ /* all kernel stacks are of the same size */
14782+ stack_left = sp & (THREAD_SIZE - 1);
14783+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14784+}
14785+EXPORT_SYMBOL(pax_check_alloca);
14786+#endif
14787diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14788index 17107bd..9623722 100644
14789--- a/arch/x86/kernel/dumpstack_64.c
14790+++ b/arch/x86/kernel/dumpstack_64.c
14791@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14792 unsigned long *irq_stack_end =
14793 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14794 unsigned used = 0;
14795- struct thread_info *tinfo;
14796 int graph = 0;
14797 unsigned long dummy;
14798+ void *stack_start;
14799
14800 if (!task)
14801 task = current;
14802@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14803 * current stack address. If the stacks consist of nested
14804 * exceptions
14805 */
14806- tinfo = task_thread_info(task);
14807 for (;;) {
14808 char *id;
14809 unsigned long *estack_end;
14810+
14811 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14812 &used, &id);
14813
14814@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14815 if (ops->stack(data, id) < 0)
14816 break;
14817
14818- bp = ops->walk_stack(tinfo, stack, bp, ops,
14819+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14820 data, estack_end, &graph);
14821 ops->stack(data, "<EOE>");
14822 /*
14823@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14824 * second-to-last pointer (index -2 to end) in the
14825 * exception stack:
14826 */
14827+ if ((u16)estack_end[-1] != __KERNEL_DS)
14828+ goto out;
14829 stack = (unsigned long *) estack_end[-2];
14830 continue;
14831 }
14832@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14833 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
14834 if (ops->stack(data, "IRQ") < 0)
14835 break;
14836- bp = ops->walk_stack(tinfo, stack, bp,
14837+ bp = ops->walk_stack(task, irq_stack, stack, bp,
14838 ops, data, irq_stack_end, &graph);
14839 /*
14840 * We link to the next stack (which would be
14841@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14842 /*
14843 * This handles the process stack:
14844 */
14845- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14846+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14847+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14848+out:
14849 put_cpu();
14850 }
14851 EXPORT_SYMBOL(dump_trace);
14852@@ -305,3 +309,50 @@ int is_valid_bugaddr(unsigned long ip)
14853
14854 return ud2 == 0x0b0f;
14855 }
14856+
14857+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14858+void pax_check_alloca(unsigned long size)
14859+{
14860+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14861+ unsigned cpu, used;
14862+ char *id;
14863+
14864+ /* check the process stack first */
14865+ stack_start = (unsigned long)task_stack_page(current);
14866+ stack_end = stack_start + THREAD_SIZE;
14867+ if (likely(stack_start <= sp && sp < stack_end)) {
14868+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
14869+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14870+ return;
14871+ }
14872+
14873+ cpu = get_cpu();
14874+
14875+ /* check the irq stacks */
14876+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14877+ stack_start = stack_end - IRQ_STACK_SIZE;
14878+ if (stack_start <= sp && sp < stack_end) {
14879+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14880+ put_cpu();
14881+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14882+ return;
14883+ }
14884+
14885+ /* check the exception stacks */
14886+ used = 0;
14887+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14888+ stack_start = stack_end - EXCEPTION_STKSZ;
14889+ if (stack_end && stack_start <= sp && sp < stack_end) {
14890+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14891+ put_cpu();
14892+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14893+ return;
14894+ }
14895+
14896+ put_cpu();
14897+
14898+ /* unknown stack */
14899+ BUG();
14900+}
14901+EXPORT_SYMBOL(pax_check_alloca);
14902+#endif
14903diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14904index 9b9f18b..9fcaa04 100644
14905--- a/arch/x86/kernel/early_printk.c
14906+++ b/arch/x86/kernel/early_printk.c
14907@@ -7,6 +7,7 @@
14908 #include <linux/pci_regs.h>
14909 #include <linux/pci_ids.h>
14910 #include <linux/errno.h>
14911+#include <linux/sched.h>
14912 #include <asm/io.h>
14913 #include <asm/processor.h>
14914 #include <asm/fcntl.h>
14915diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14916index 7b784f4..db6b628 100644
14917--- a/arch/x86/kernel/entry_32.S
14918+++ b/arch/x86/kernel/entry_32.S
14919@@ -179,13 +179,146 @@
14920 /*CFI_REL_OFFSET gs, PT_GS*/
14921 .endm
14922 .macro SET_KERNEL_GS reg
14923+
14924+#ifdef CONFIG_CC_STACKPROTECTOR
14925 movl $(__KERNEL_STACK_CANARY), \reg
14926+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14927+ movl $(__USER_DS), \reg
14928+#else
14929+ xorl \reg, \reg
14930+#endif
14931+
14932 movl \reg, %gs
14933 .endm
14934
14935 #endif /* CONFIG_X86_32_LAZY_GS */
14936
14937-.macro SAVE_ALL
14938+.macro pax_enter_kernel
14939+#ifdef CONFIG_PAX_KERNEXEC
14940+ call pax_enter_kernel
14941+#endif
14942+.endm
14943+
14944+.macro pax_exit_kernel
14945+#ifdef CONFIG_PAX_KERNEXEC
14946+ call pax_exit_kernel
14947+#endif
14948+.endm
14949+
14950+#ifdef CONFIG_PAX_KERNEXEC
14951+ENTRY(pax_enter_kernel)
14952+#ifdef CONFIG_PARAVIRT
14953+ pushl %eax
14954+ pushl %ecx
14955+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14956+ mov %eax, %esi
14957+#else
14958+ mov %cr0, %esi
14959+#endif
14960+ bts $16, %esi
14961+ jnc 1f
14962+ mov %cs, %esi
14963+ cmp $__KERNEL_CS, %esi
14964+ jz 3f
14965+ ljmp $__KERNEL_CS, $3f
14966+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14967+2:
14968+#ifdef CONFIG_PARAVIRT
14969+ mov %esi, %eax
14970+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14971+#else
14972+ mov %esi, %cr0
14973+#endif
14974+3:
14975+#ifdef CONFIG_PARAVIRT
14976+ popl %ecx
14977+ popl %eax
14978+#endif
14979+ ret
14980+ENDPROC(pax_enter_kernel)
14981+
14982+ENTRY(pax_exit_kernel)
14983+#ifdef CONFIG_PARAVIRT
14984+ pushl %eax
14985+ pushl %ecx
14986+#endif
14987+ mov %cs, %esi
14988+ cmp $__KERNEXEC_KERNEL_CS, %esi
14989+ jnz 2f
14990+#ifdef CONFIG_PARAVIRT
14991+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
14992+ mov %eax, %esi
14993+#else
14994+ mov %cr0, %esi
14995+#endif
14996+ btr $16, %esi
14997+ ljmp $__KERNEL_CS, $1f
14998+1:
14999+#ifdef CONFIG_PARAVIRT
15000+ mov %esi, %eax
15001+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
15002+#else
15003+ mov %esi, %cr0
15004+#endif
15005+2:
15006+#ifdef CONFIG_PARAVIRT
15007+ popl %ecx
15008+ popl %eax
15009+#endif
15010+ ret
15011+ENDPROC(pax_exit_kernel)
15012+#endif
15013+
15014+.macro pax_erase_kstack
15015+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15016+ call pax_erase_kstack
15017+#endif
15018+.endm
15019+
15020+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15021+/*
15022+ * ebp: thread_info
15023+ * ecx, edx: can be clobbered
15024+ */
15025+ENTRY(pax_erase_kstack)
15026+ pushl %edi
15027+ pushl %eax
15028+
15029+ mov TI_lowest_stack(%ebp), %edi
15030+ mov $-0xBEEF, %eax
15031+ std
15032+
15033+1: mov %edi, %ecx
15034+ and $THREAD_SIZE_asm - 1, %ecx
15035+ shr $2, %ecx
15036+ repne scasl
15037+ jecxz 2f
15038+
15039+ cmp $2*16, %ecx
15040+ jc 2f
15041+
15042+ mov $2*16, %ecx
15043+ repe scasl
15044+ jecxz 2f
15045+ jne 1b
15046+
15047+2: cld
15048+ mov %esp, %ecx
15049+ sub %edi, %ecx
15050+ shr $2, %ecx
15051+ rep stosl
15052+
15053+ mov TI_task_thread_sp0(%ebp), %edi
15054+ sub $128, %edi
15055+ mov %edi, TI_lowest_stack(%ebp)
15056+
15057+ popl %eax
15058+ popl %edi
15059+ ret
15060+ENDPROC(pax_erase_kstack)
15061+#endif
15062+
15063+.macro __SAVE_ALL _DS
15064 cld
15065 PUSH_GS
15066 pushl_cfi %fs
15067@@ -208,7 +341,7 @@
15068 CFI_REL_OFFSET ecx, 0
15069 pushl_cfi %ebx
15070 CFI_REL_OFFSET ebx, 0
15071- movl $(__USER_DS), %edx
15072+ movl $\_DS, %edx
15073 movl %edx, %ds
15074 movl %edx, %es
15075 movl $(__KERNEL_PERCPU), %edx
15076@@ -216,6 +349,15 @@
15077 SET_KERNEL_GS %edx
15078 .endm
15079
15080+.macro SAVE_ALL
15081+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
15082+ __SAVE_ALL __KERNEL_DS
15083+ pax_enter_kernel
15084+#else
15085+ __SAVE_ALL __USER_DS
15086+#endif
15087+.endm
15088+
15089 .macro RESTORE_INT_REGS
15090 popl_cfi %ebx
15091 CFI_RESTORE ebx
15092@@ -301,7 +443,7 @@ ENTRY(ret_from_fork)
15093 popfl_cfi
15094 jmp syscall_exit
15095 CFI_ENDPROC
15096-END(ret_from_fork)
15097+ENDPROC(ret_from_fork)
15098
15099 /*
15100 * Interrupt exit functions should be protected against kprobes
15101@@ -335,7 +477,15 @@ resume_userspace_sig:
15102 andl $SEGMENT_RPL_MASK, %eax
15103 #endif
15104 cmpl $USER_RPL, %eax
15105+
15106+#ifdef CONFIG_PAX_KERNEXEC
15107+ jae resume_userspace
15108+
15109+ pax_exit_kernel
15110+ jmp resume_kernel
15111+#else
15112 jb resume_kernel # not returning to v8086 or userspace
15113+#endif
15114
15115 ENTRY(resume_userspace)
15116 LOCKDEP_SYS_EXIT
15117@@ -347,8 +497,8 @@ ENTRY(resume_userspace)
15118 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15119 # int/exception return?
15120 jne work_pending
15121- jmp restore_all
15122-END(ret_from_exception)
15123+ jmp restore_all_pax
15124+ENDPROC(ret_from_exception)
15125
15126 #ifdef CONFIG_PREEMPT
15127 ENTRY(resume_kernel)
15128@@ -363,7 +513,7 @@ need_resched:
15129 jz restore_all
15130 call preempt_schedule_irq
15131 jmp need_resched
15132-END(resume_kernel)
15133+ENDPROC(resume_kernel)
15134 #endif
15135 CFI_ENDPROC
15136 /*
15137@@ -397,23 +547,34 @@ sysenter_past_esp:
15138 /*CFI_REL_OFFSET cs, 0*/
15139 /*
15140 * Push current_thread_info()->sysenter_return to the stack.
15141- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15142- * pushed above; +8 corresponds to copy_thread's esp0 setting.
15143 */
15144- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
15145+ pushl_cfi $0
15146 CFI_REL_OFFSET eip, 0
15147
15148 pushl_cfi %eax
15149 SAVE_ALL
15150+ GET_THREAD_INFO(%ebp)
15151+ movl TI_sysenter_return(%ebp),%ebp
15152+ movl %ebp,PT_EIP(%esp)
15153 ENABLE_INTERRUPTS(CLBR_NONE)
15154
15155 /*
15156 * Load the potential sixth argument from user stack.
15157 * Careful about security.
15158 */
15159+ movl PT_OLDESP(%esp),%ebp
15160+
15161+#ifdef CONFIG_PAX_MEMORY_UDEREF
15162+ mov PT_OLDSS(%esp),%ds
15163+1: movl %ds:(%ebp),%ebp
15164+ push %ss
15165+ pop %ds
15166+#else
15167 cmpl $__PAGE_OFFSET-3,%ebp
15168 jae syscall_fault
15169 1: movl (%ebp),%ebp
15170+#endif
15171+
15172 movl %ebp,PT_EBP(%esp)
15173 .section __ex_table,"a"
15174 .align 4
15175@@ -436,12 +597,24 @@ sysenter_do_call:
15176 testl $_TIF_ALLWORK_MASK, %ecx
15177 jne sysexit_audit
15178 sysenter_exit:
15179+
15180+#ifdef CONFIG_PAX_RANDKSTACK
15181+ pushl_cfi %eax
15182+ movl %esp, %eax
15183+ call pax_randomize_kstack
15184+ popl_cfi %eax
15185+#endif
15186+
15187+ pax_erase_kstack
15188+
15189 /* if something modifies registers it must also disable sysexit */
15190 movl PT_EIP(%esp), %edx
15191 movl PT_OLDESP(%esp), %ecx
15192 xorl %ebp,%ebp
15193 TRACE_IRQS_ON
15194 1: mov PT_FS(%esp), %fs
15195+2: mov PT_DS(%esp), %ds
15196+3: mov PT_ES(%esp), %es
15197 PTGS_TO_GS
15198 ENABLE_INTERRUPTS_SYSEXIT
15199
15200@@ -458,6 +631,9 @@ sysenter_audit:
15201 movl %eax,%edx /* 2nd arg: syscall number */
15202 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15203 call __audit_syscall_entry
15204+
15205+ pax_erase_kstack
15206+
15207 pushl_cfi %ebx
15208 movl PT_EAX(%esp),%eax /* reload syscall number */
15209 jmp sysenter_do_call
15210@@ -483,11 +659,17 @@ sysexit_audit:
15211
15212 CFI_ENDPROC
15213 .pushsection .fixup,"ax"
15214-2: movl $0,PT_FS(%esp)
15215+4: movl $0,PT_FS(%esp)
15216+ jmp 1b
15217+5: movl $0,PT_DS(%esp)
15218+ jmp 1b
15219+6: movl $0,PT_ES(%esp)
15220 jmp 1b
15221 .section __ex_table,"a"
15222 .align 4
15223- .long 1b,2b
15224+ .long 1b,4b
15225+ .long 2b,5b
15226+ .long 3b,6b
15227 .popsection
15228 PTGS_TO_GS_EX
15229 ENDPROC(ia32_sysenter_target)
15230@@ -520,6 +702,15 @@ syscall_exit:
15231 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15232 jne syscall_exit_work
15233
15234+restore_all_pax:
15235+
15236+#ifdef CONFIG_PAX_RANDKSTACK
15237+ movl %esp, %eax
15238+ call pax_randomize_kstack
15239+#endif
15240+
15241+ pax_erase_kstack
15242+
15243 restore_all:
15244 TRACE_IRQS_IRET
15245 restore_all_notrace:
15246@@ -579,14 +770,34 @@ ldt_ss:
15247 * compensating for the offset by changing to the ESPFIX segment with
15248 * a base address that matches for the difference.
15249 */
15250-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
15251+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
15252 mov %esp, %edx /* load kernel esp */
15253 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15254 mov %dx, %ax /* eax: new kernel esp */
15255 sub %eax, %edx /* offset (low word is 0) */
15256+#ifdef CONFIG_SMP
15257+ movl PER_CPU_VAR(cpu_number), %ebx
15258+ shll $PAGE_SHIFT_asm, %ebx
15259+ addl $cpu_gdt_table, %ebx
15260+#else
15261+ movl $cpu_gdt_table, %ebx
15262+#endif
15263 shr $16, %edx
15264- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
15265- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
15266+
15267+#ifdef CONFIG_PAX_KERNEXEC
15268+ mov %cr0, %esi
15269+ btr $16, %esi
15270+ mov %esi, %cr0
15271+#endif
15272+
15273+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
15274+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
15275+
15276+#ifdef CONFIG_PAX_KERNEXEC
15277+ bts $16, %esi
15278+ mov %esi, %cr0
15279+#endif
15280+
15281 pushl_cfi $__ESPFIX_SS
15282 pushl_cfi %eax /* new kernel esp */
15283 /* Disable interrupts, but do not irqtrace this section: we
15284@@ -615,38 +826,30 @@ work_resched:
15285 movl TI_flags(%ebp), %ecx
15286 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15287 # than syscall tracing?
15288- jz restore_all
15289+ jz restore_all_pax
15290 testb $_TIF_NEED_RESCHED, %cl
15291 jnz work_resched
15292
15293 work_notifysig: # deal with pending signals and
15294 # notify-resume requests
15295+ movl %esp, %eax
15296 #ifdef CONFIG_VM86
15297 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15298- movl %esp, %eax
15299- jne work_notifysig_v86 # returning to kernel-space or
15300+ jz 1f # returning to kernel-space or
15301 # vm86-space
15302- TRACE_IRQS_ON
15303- ENABLE_INTERRUPTS(CLBR_NONE)
15304- xorl %edx, %edx
15305- call do_notify_resume
15306- jmp resume_userspace_sig
15307
15308- ALIGN
15309-work_notifysig_v86:
15310 pushl_cfi %ecx # save ti_flags for do_notify_resume
15311 call save_v86_state # %eax contains pt_regs pointer
15312 popl_cfi %ecx
15313 movl %eax, %esp
15314-#else
15315- movl %esp, %eax
15316+1:
15317 #endif
15318 TRACE_IRQS_ON
15319 ENABLE_INTERRUPTS(CLBR_NONE)
15320 xorl %edx, %edx
15321 call do_notify_resume
15322 jmp resume_userspace_sig
15323-END(work_pending)
15324+ENDPROC(work_pending)
15325
15326 # perform syscall exit tracing
15327 ALIGN
15328@@ -654,11 +857,14 @@ syscall_trace_entry:
15329 movl $-ENOSYS,PT_EAX(%esp)
15330 movl %esp, %eax
15331 call syscall_trace_enter
15332+
15333+ pax_erase_kstack
15334+
15335 /* What it returned is what we'll actually use. */
15336 cmpl $(NR_syscalls), %eax
15337 jnae syscall_call
15338 jmp syscall_exit
15339-END(syscall_trace_entry)
15340+ENDPROC(syscall_trace_entry)
15341
15342 # perform syscall exit tracing
15343 ALIGN
15344@@ -671,20 +877,24 @@ syscall_exit_work:
15345 movl %esp, %eax
15346 call syscall_trace_leave
15347 jmp resume_userspace
15348-END(syscall_exit_work)
15349+ENDPROC(syscall_exit_work)
15350 CFI_ENDPROC
15351
15352 RING0_INT_FRAME # can't unwind into user space anyway
15353 syscall_fault:
15354+#ifdef CONFIG_PAX_MEMORY_UDEREF
15355+ push %ss
15356+ pop %ds
15357+#endif
15358 GET_THREAD_INFO(%ebp)
15359 movl $-EFAULT,PT_EAX(%esp)
15360 jmp resume_userspace
15361-END(syscall_fault)
15362+ENDPROC(syscall_fault)
15363
15364 syscall_badsys:
15365 movl $-ENOSYS,PT_EAX(%esp)
15366 jmp resume_userspace
15367-END(syscall_badsys)
15368+ENDPROC(syscall_badsys)
15369 CFI_ENDPROC
15370 /*
15371 * End of kprobes section
15372@@ -756,6 +966,36 @@ ENTRY(ptregs_clone)
15373 CFI_ENDPROC
15374 ENDPROC(ptregs_clone)
15375
15376+ ALIGN;
15377+ENTRY(kernel_execve)
15378+ CFI_STARTPROC
15379+ pushl_cfi %ebp
15380+ sub $PT_OLDSS+4,%esp
15381+ pushl_cfi %edi
15382+ pushl_cfi %ecx
15383+ pushl_cfi %eax
15384+ lea 3*4(%esp),%edi
15385+ mov $PT_OLDSS/4+1,%ecx
15386+ xorl %eax,%eax
15387+ rep stosl
15388+ popl_cfi %eax
15389+ popl_cfi %ecx
15390+ popl_cfi %edi
15391+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15392+ pushl_cfi %esp
15393+ call sys_execve
15394+ add $4,%esp
15395+ CFI_ADJUST_CFA_OFFSET -4
15396+ GET_THREAD_INFO(%ebp)
15397+ test %eax,%eax
15398+ jz syscall_exit
15399+ add $PT_OLDSS+4,%esp
15400+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
15401+ popl_cfi %ebp
15402+ ret
15403+ CFI_ENDPROC
15404+ENDPROC(kernel_execve)
15405+
15406 .macro FIXUP_ESPFIX_STACK
15407 /*
15408 * Switch back for ESPFIX stack to the normal zerobased stack
15409@@ -765,8 +1005,15 @@ ENDPROC(ptregs_clone)
15410 * normal stack and adjusts ESP with the matching offset.
15411 */
15412 /* fixup the stack */
15413- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
15414- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
15415+#ifdef CONFIG_SMP
15416+ movl PER_CPU_VAR(cpu_number), %ebx
15417+ shll $PAGE_SHIFT_asm, %ebx
15418+ addl $cpu_gdt_table, %ebx
15419+#else
15420+ movl $cpu_gdt_table, %ebx
15421+#endif
15422+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
15423+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
15424 shl $16, %eax
15425 addl %esp, %eax /* the adjusted stack pointer */
15426 pushl_cfi $__KERNEL_DS
15427@@ -819,7 +1066,7 @@ vector=vector+1
15428 .endr
15429 2: jmp common_interrupt
15430 .endr
15431-END(irq_entries_start)
15432+ENDPROC(irq_entries_start)
15433
15434 .previous
15435 END(interrupt)
15436@@ -867,7 +1114,7 @@ ENTRY(coprocessor_error)
15437 pushl_cfi $do_coprocessor_error
15438 jmp error_code
15439 CFI_ENDPROC
15440-END(coprocessor_error)
15441+ENDPROC(coprocessor_error)
15442
15443 ENTRY(simd_coprocessor_error)
15444 RING0_INT_FRAME
15445@@ -888,7 +1135,7 @@ ENTRY(simd_coprocessor_error)
15446 #endif
15447 jmp error_code
15448 CFI_ENDPROC
15449-END(simd_coprocessor_error)
15450+ENDPROC(simd_coprocessor_error)
15451
15452 ENTRY(device_not_available)
15453 RING0_INT_FRAME
15454@@ -896,7 +1143,7 @@ ENTRY(device_not_available)
15455 pushl_cfi $do_device_not_available
15456 jmp error_code
15457 CFI_ENDPROC
15458-END(device_not_available)
15459+ENDPROC(device_not_available)
15460
15461 #ifdef CONFIG_PARAVIRT
15462 ENTRY(native_iret)
15463@@ -905,12 +1152,12 @@ ENTRY(native_iret)
15464 .align 4
15465 .long native_iret, iret_exc
15466 .previous
15467-END(native_iret)
15468+ENDPROC(native_iret)
15469
15470 ENTRY(native_irq_enable_sysexit)
15471 sti
15472 sysexit
15473-END(native_irq_enable_sysexit)
15474+ENDPROC(native_irq_enable_sysexit)
15475 #endif
15476
15477 ENTRY(overflow)
15478@@ -919,7 +1166,7 @@ ENTRY(overflow)
15479 pushl_cfi $do_overflow
15480 jmp error_code
15481 CFI_ENDPROC
15482-END(overflow)
15483+ENDPROC(overflow)
15484
15485 ENTRY(bounds)
15486 RING0_INT_FRAME
15487@@ -927,7 +1174,7 @@ ENTRY(bounds)
15488 pushl_cfi $do_bounds
15489 jmp error_code
15490 CFI_ENDPROC
15491-END(bounds)
15492+ENDPROC(bounds)
15493
15494 ENTRY(invalid_op)
15495 RING0_INT_FRAME
15496@@ -935,7 +1182,7 @@ ENTRY(invalid_op)
15497 pushl_cfi $do_invalid_op
15498 jmp error_code
15499 CFI_ENDPROC
15500-END(invalid_op)
15501+ENDPROC(invalid_op)
15502
15503 ENTRY(coprocessor_segment_overrun)
15504 RING0_INT_FRAME
15505@@ -943,35 +1190,35 @@ ENTRY(coprocessor_segment_overrun)
15506 pushl_cfi $do_coprocessor_segment_overrun
15507 jmp error_code
15508 CFI_ENDPROC
15509-END(coprocessor_segment_overrun)
15510+ENDPROC(coprocessor_segment_overrun)
15511
15512 ENTRY(invalid_TSS)
15513 RING0_EC_FRAME
15514 pushl_cfi $do_invalid_TSS
15515 jmp error_code
15516 CFI_ENDPROC
15517-END(invalid_TSS)
15518+ENDPROC(invalid_TSS)
15519
15520 ENTRY(segment_not_present)
15521 RING0_EC_FRAME
15522 pushl_cfi $do_segment_not_present
15523 jmp error_code
15524 CFI_ENDPROC
15525-END(segment_not_present)
15526+ENDPROC(segment_not_present)
15527
15528 ENTRY(stack_segment)
15529 RING0_EC_FRAME
15530 pushl_cfi $do_stack_segment
15531 jmp error_code
15532 CFI_ENDPROC
15533-END(stack_segment)
15534+ENDPROC(stack_segment)
15535
15536 ENTRY(alignment_check)
15537 RING0_EC_FRAME
15538 pushl_cfi $do_alignment_check
15539 jmp error_code
15540 CFI_ENDPROC
15541-END(alignment_check)
15542+ENDPROC(alignment_check)
15543
15544 ENTRY(divide_error)
15545 RING0_INT_FRAME
15546@@ -979,7 +1226,7 @@ ENTRY(divide_error)
15547 pushl_cfi $do_divide_error
15548 jmp error_code
15549 CFI_ENDPROC
15550-END(divide_error)
15551+ENDPROC(divide_error)
15552
15553 #ifdef CONFIG_X86_MCE
15554 ENTRY(machine_check)
15555@@ -988,7 +1235,7 @@ ENTRY(machine_check)
15556 pushl_cfi machine_check_vector
15557 jmp error_code
15558 CFI_ENDPROC
15559-END(machine_check)
15560+ENDPROC(machine_check)
15561 #endif
15562
15563 ENTRY(spurious_interrupt_bug)
15564@@ -997,7 +1244,7 @@ ENTRY(spurious_interrupt_bug)
15565 pushl_cfi $do_spurious_interrupt_bug
15566 jmp error_code
15567 CFI_ENDPROC
15568-END(spurious_interrupt_bug)
15569+ENDPROC(spurious_interrupt_bug)
15570 /*
15571 * End of kprobes section
15572 */
15573@@ -1112,7 +1359,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
15574
15575 ENTRY(mcount)
15576 ret
15577-END(mcount)
15578+ENDPROC(mcount)
15579
15580 ENTRY(ftrace_caller)
15581 cmpl $0, function_trace_stop
15582@@ -1141,7 +1388,7 @@ ftrace_graph_call:
15583 .globl ftrace_stub
15584 ftrace_stub:
15585 ret
15586-END(ftrace_caller)
15587+ENDPROC(ftrace_caller)
15588
15589 #else /* ! CONFIG_DYNAMIC_FTRACE */
15590
15591@@ -1177,7 +1424,7 @@ trace:
15592 popl %ecx
15593 popl %eax
15594 jmp ftrace_stub
15595-END(mcount)
15596+ENDPROC(mcount)
15597 #endif /* CONFIG_DYNAMIC_FTRACE */
15598 #endif /* CONFIG_FUNCTION_TRACER */
15599
15600@@ -1198,7 +1445,7 @@ ENTRY(ftrace_graph_caller)
15601 popl %ecx
15602 popl %eax
15603 ret
15604-END(ftrace_graph_caller)
15605+ENDPROC(ftrace_graph_caller)
15606
15607 .globl return_to_handler
15608 return_to_handler:
15609@@ -1253,15 +1500,18 @@ error_code:
15610 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15611 REG_TO_PTGS %ecx
15612 SET_KERNEL_GS %ecx
15613- movl $(__USER_DS), %ecx
15614+ movl $(__KERNEL_DS), %ecx
15615 movl %ecx, %ds
15616 movl %ecx, %es
15617+
15618+ pax_enter_kernel
15619+
15620 TRACE_IRQS_OFF
15621 movl %esp,%eax # pt_regs pointer
15622 call *%edi
15623 jmp ret_from_exception
15624 CFI_ENDPROC
15625-END(page_fault)
15626+ENDPROC(page_fault)
15627
15628 /*
15629 * Debug traps and NMI can happen at the one SYSENTER instruction
15630@@ -1303,7 +1553,7 @@ debug_stack_correct:
15631 call do_debug
15632 jmp ret_from_exception
15633 CFI_ENDPROC
15634-END(debug)
15635+ENDPROC(debug)
15636
15637 /*
15638 * NMI is doubly nasty. It can happen _while_ we're handling
15639@@ -1340,6 +1590,9 @@ nmi_stack_correct:
15640 xorl %edx,%edx # zero error code
15641 movl %esp,%eax # pt_regs pointer
15642 call do_nmi
15643+
15644+ pax_exit_kernel
15645+
15646 jmp restore_all_notrace
15647 CFI_ENDPROC
15648
15649@@ -1376,12 +1629,15 @@ nmi_espfix_stack:
15650 FIXUP_ESPFIX_STACK # %eax == %esp
15651 xorl %edx,%edx # zero error code
15652 call do_nmi
15653+
15654+ pax_exit_kernel
15655+
15656 RESTORE_REGS
15657 lss 12+4(%esp), %esp # back to espfix stack
15658 CFI_ADJUST_CFA_OFFSET -24
15659 jmp irq_return
15660 CFI_ENDPROC
15661-END(nmi)
15662+ENDPROC(nmi)
15663
15664 ENTRY(int3)
15665 RING0_INT_FRAME
15666@@ -1393,14 +1649,14 @@ ENTRY(int3)
15667 call do_int3
15668 jmp ret_from_exception
15669 CFI_ENDPROC
15670-END(int3)
15671+ENDPROC(int3)
15672
15673 ENTRY(general_protection)
15674 RING0_EC_FRAME
15675 pushl_cfi $do_general_protection
15676 jmp error_code
15677 CFI_ENDPROC
15678-END(general_protection)
15679+ENDPROC(general_protection)
15680
15681 #ifdef CONFIG_KVM_GUEST
15682 ENTRY(async_page_fault)
15683@@ -1408,7 +1664,7 @@ ENTRY(async_page_fault)
15684 pushl_cfi $do_async_page_fault
15685 jmp error_code
15686 CFI_ENDPROC
15687-END(async_page_fault)
15688+ENDPROC(async_page_fault)
15689 #endif
15690
15691 /*
15692diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15693index cdc79b5..4710a75 100644
15694--- a/arch/x86/kernel/entry_64.S
15695+++ b/arch/x86/kernel/entry_64.S
15696@@ -56,6 +56,8 @@
15697 #include <asm/ftrace.h>
15698 #include <asm/percpu.h>
15699 #include <linux/err.h>
15700+#include <asm/pgtable.h>
15701+#include <asm/alternative-asm.h>
15702
15703 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15704 #include <linux/elf-em.h>
15705@@ -69,8 +71,9 @@
15706 #ifdef CONFIG_FUNCTION_TRACER
15707 #ifdef CONFIG_DYNAMIC_FTRACE
15708 ENTRY(mcount)
15709+ pax_force_retaddr
15710 retq
15711-END(mcount)
15712+ENDPROC(mcount)
15713
15714 ENTRY(ftrace_caller)
15715 cmpl $0, function_trace_stop
15716@@ -93,8 +96,9 @@ GLOBAL(ftrace_graph_call)
15717 #endif
15718
15719 GLOBAL(ftrace_stub)
15720+ pax_force_retaddr
15721 retq
15722-END(ftrace_caller)
15723+ENDPROC(ftrace_caller)
15724
15725 #else /* ! CONFIG_DYNAMIC_FTRACE */
15726 ENTRY(mcount)
15727@@ -113,6 +117,7 @@ ENTRY(mcount)
15728 #endif
15729
15730 GLOBAL(ftrace_stub)
15731+ pax_force_retaddr
15732 retq
15733
15734 trace:
15735@@ -122,12 +127,13 @@ trace:
15736 movq 8(%rbp), %rsi
15737 subq $MCOUNT_INSN_SIZE, %rdi
15738
15739+ pax_force_fptr ftrace_trace_function
15740 call *ftrace_trace_function
15741
15742 MCOUNT_RESTORE_FRAME
15743
15744 jmp ftrace_stub
15745-END(mcount)
15746+ENDPROC(mcount)
15747 #endif /* CONFIG_DYNAMIC_FTRACE */
15748 #endif /* CONFIG_FUNCTION_TRACER */
15749
15750@@ -147,8 +153,9 @@ ENTRY(ftrace_graph_caller)
15751
15752 MCOUNT_RESTORE_FRAME
15753
15754+ pax_force_retaddr
15755 retq
15756-END(ftrace_graph_caller)
15757+ENDPROC(ftrace_graph_caller)
15758
15759 GLOBAL(return_to_handler)
15760 subq $24, %rsp
15761@@ -164,6 +171,7 @@ GLOBAL(return_to_handler)
15762 movq 8(%rsp), %rdx
15763 movq (%rsp), %rax
15764 addq $24, %rsp
15765+ pax_force_fptr %rdi
15766 jmp *%rdi
15767 #endif
15768
15769@@ -179,6 +187,282 @@ ENTRY(native_usergs_sysret64)
15770 ENDPROC(native_usergs_sysret64)
15771 #endif /* CONFIG_PARAVIRT */
15772
15773+ .macro ljmpq sel, off
15774+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15775+ .byte 0x48; ljmp *1234f(%rip)
15776+ .pushsection .rodata
15777+ .align 16
15778+ 1234: .quad \off; .word \sel
15779+ .popsection
15780+#else
15781+ pushq $\sel
15782+ pushq $\off
15783+ lretq
15784+#endif
15785+ .endm
15786+
15787+ .macro pax_enter_kernel
15788+ pax_set_fptr_mask
15789+#ifdef CONFIG_PAX_KERNEXEC
15790+ call pax_enter_kernel
15791+#endif
15792+ .endm
15793+
15794+ .macro pax_exit_kernel
15795+#ifdef CONFIG_PAX_KERNEXEC
15796+ call pax_exit_kernel
15797+#endif
15798+ .endm
15799+
15800+#ifdef CONFIG_PAX_KERNEXEC
15801+ENTRY(pax_enter_kernel)
15802+ pushq %rdi
15803+
15804+#ifdef CONFIG_PARAVIRT
15805+ PV_SAVE_REGS(CLBR_RDI)
15806+#endif
15807+
15808+ GET_CR0_INTO_RDI
15809+ bts $16,%rdi
15810+ jnc 3f
15811+ mov %cs,%edi
15812+ cmp $__KERNEL_CS,%edi
15813+ jnz 2f
15814+1:
15815+
15816+#ifdef CONFIG_PARAVIRT
15817+ PV_RESTORE_REGS(CLBR_RDI)
15818+#endif
15819+
15820+ popq %rdi
15821+ pax_force_retaddr
15822+ retq
15823+
15824+2: ljmpq __KERNEL_CS,1f
15825+3: ljmpq __KERNEXEC_KERNEL_CS,4f
15826+4: SET_RDI_INTO_CR0
15827+ jmp 1b
15828+ENDPROC(pax_enter_kernel)
15829+
15830+ENTRY(pax_exit_kernel)
15831+ pushq %rdi
15832+
15833+#ifdef CONFIG_PARAVIRT
15834+ PV_SAVE_REGS(CLBR_RDI)
15835+#endif
15836+
15837+ mov %cs,%rdi
15838+ cmp $__KERNEXEC_KERNEL_CS,%edi
15839+ jz 2f
15840+1:
15841+
15842+#ifdef CONFIG_PARAVIRT
15843+ PV_RESTORE_REGS(CLBR_RDI);
15844+#endif
15845+
15846+ popq %rdi
15847+ pax_force_retaddr
15848+ retq
15849+
15850+2: GET_CR0_INTO_RDI
15851+ btr $16,%rdi
15852+ ljmpq __KERNEL_CS,3f
15853+3: SET_RDI_INTO_CR0
15854+ jmp 1b
15855+#ifdef CONFIG_PARAVIRT
15856+ PV_RESTORE_REGS(CLBR_RDI);
15857+#endif
15858+
15859+ popq %rdi
15860+ pax_force_retaddr
15861+ retq
15862+ENDPROC(pax_exit_kernel)
15863+#endif
15864+
15865+ .macro pax_enter_kernel_user
15866+ pax_set_fptr_mask
15867+#ifdef CONFIG_PAX_MEMORY_UDEREF
15868+ call pax_enter_kernel_user
15869+#endif
15870+ .endm
15871+
15872+ .macro pax_exit_kernel_user
15873+#ifdef CONFIG_PAX_MEMORY_UDEREF
15874+ call pax_exit_kernel_user
15875+#endif
15876+#ifdef CONFIG_PAX_RANDKSTACK
15877+ pushq %rax
15878+ call pax_randomize_kstack
15879+ popq %rax
15880+#endif
15881+ .endm
15882+
15883+#ifdef CONFIG_PAX_MEMORY_UDEREF
15884+ENTRY(pax_enter_kernel_user)
15885+ pushq %rdi
15886+ pushq %rbx
15887+
15888+#ifdef CONFIG_PARAVIRT
15889+ PV_SAVE_REGS(CLBR_RDI)
15890+#endif
15891+
15892+ GET_CR3_INTO_RDI
15893+ mov %rdi,%rbx
15894+ add $__START_KERNEL_map,%rbx
15895+ sub phys_base(%rip),%rbx
15896+
15897+#ifdef CONFIG_PARAVIRT
15898+ pushq %rdi
15899+ cmpl $0, pv_info+PARAVIRT_enabled
15900+ jz 1f
15901+ i = 0
15902+ .rept USER_PGD_PTRS
15903+ mov i*8(%rbx),%rsi
15904+ mov $0,%sil
15905+ lea i*8(%rbx),%rdi
15906+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15907+ i = i + 1
15908+ .endr
15909+ jmp 2f
15910+1:
15911+#endif
15912+
15913+ i = 0
15914+ .rept USER_PGD_PTRS
15915+ movb $0,i*8(%rbx)
15916+ i = i + 1
15917+ .endr
15918+
15919+#ifdef CONFIG_PARAVIRT
15920+2: popq %rdi
15921+#endif
15922+ SET_RDI_INTO_CR3
15923+
15924+#ifdef CONFIG_PAX_KERNEXEC
15925+ GET_CR0_INTO_RDI
15926+ bts $16,%rdi
15927+ SET_RDI_INTO_CR0
15928+#endif
15929+
15930+#ifdef CONFIG_PARAVIRT
15931+ PV_RESTORE_REGS(CLBR_RDI)
15932+#endif
15933+
15934+ popq %rbx
15935+ popq %rdi
15936+ pax_force_retaddr
15937+ retq
15938+ENDPROC(pax_enter_kernel_user)
15939+
15940+ENTRY(pax_exit_kernel_user)
15941+ push %rdi
15942+
15943+#ifdef CONFIG_PARAVIRT
15944+ pushq %rbx
15945+ PV_SAVE_REGS(CLBR_RDI)
15946+#endif
15947+
15948+#ifdef CONFIG_PAX_KERNEXEC
15949+ GET_CR0_INTO_RDI
15950+ btr $16,%rdi
15951+ SET_RDI_INTO_CR0
15952+#endif
15953+
15954+ GET_CR3_INTO_RDI
15955+ add $__START_KERNEL_map,%rdi
15956+ sub phys_base(%rip),%rdi
15957+
15958+#ifdef CONFIG_PARAVIRT
15959+ cmpl $0, pv_info+PARAVIRT_enabled
15960+ jz 1f
15961+ mov %rdi,%rbx
15962+ i = 0
15963+ .rept USER_PGD_PTRS
15964+ mov i*8(%rbx),%rsi
15965+ mov $0x67,%sil
15966+ lea i*8(%rbx),%rdi
15967+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15968+ i = i + 1
15969+ .endr
15970+ jmp 2f
15971+1:
15972+#endif
15973+
15974+ i = 0
15975+ .rept USER_PGD_PTRS
15976+ movb $0x67,i*8(%rdi)
15977+ i = i + 1
15978+ .endr
15979+
15980+#ifdef CONFIG_PARAVIRT
15981+2: PV_RESTORE_REGS(CLBR_RDI)
15982+ popq %rbx
15983+#endif
15984+
15985+ popq %rdi
15986+ pax_force_retaddr
15987+ retq
15988+ENDPROC(pax_exit_kernel_user)
15989+#endif
15990+
15991+.macro pax_erase_kstack
15992+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15993+ call pax_erase_kstack
15994+#endif
15995+.endm
15996+
15997+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15998+/*
15999+ * r11: thread_info
16000+ * rcx, rdx: can be clobbered
16001+ */
16002+ENTRY(pax_erase_kstack)
16003+ pushq %rdi
16004+ pushq %rax
16005+ pushq %r11
16006+
16007+ GET_THREAD_INFO(%r11)
16008+ mov TI_lowest_stack(%r11), %rdi
16009+ mov $-0xBEEF, %rax
16010+ std
16011+
16012+1: mov %edi, %ecx
16013+ and $THREAD_SIZE_asm - 1, %ecx
16014+ shr $3, %ecx
16015+ repne scasq
16016+ jecxz 2f
16017+
16018+ cmp $2*8, %ecx
16019+ jc 2f
16020+
16021+ mov $2*8, %ecx
16022+ repe scasq
16023+ jecxz 2f
16024+ jne 1b
16025+
16026+2: cld
16027+ mov %esp, %ecx
16028+ sub %edi, %ecx
16029+
16030+ cmp $THREAD_SIZE_asm, %rcx
16031+ jb 3f
16032+ ud2
16033+3:
16034+
16035+ shr $3, %ecx
16036+ rep stosq
16037+
16038+ mov TI_task_thread_sp0(%r11), %rdi
16039+ sub $256, %rdi
16040+ mov %rdi, TI_lowest_stack(%r11)
16041+
16042+ popq %r11
16043+ popq %rax
16044+ popq %rdi
16045+ pax_force_retaddr
16046+ ret
16047+ENDPROC(pax_erase_kstack)
16048+#endif
16049
16050 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
16051 #ifdef CONFIG_TRACE_IRQFLAGS
16052@@ -232,8 +516,8 @@ ENDPROC(native_usergs_sysret64)
16053 .endm
16054
16055 .macro UNFAKE_STACK_FRAME
16056- addq $8*6, %rsp
16057- CFI_ADJUST_CFA_OFFSET -(6*8)
16058+ addq $8*6 + ARG_SKIP, %rsp
16059+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
16060 .endm
16061
16062 /*
16063@@ -320,7 +604,7 @@ ENDPROC(native_usergs_sysret64)
16064 movq %rsp, %rsi
16065
16066 leaq -RBP(%rsp),%rdi /* arg1 for handler */
16067- testl $3, CS-RBP(%rsi)
16068+ testb $3, CS-RBP(%rsi)
16069 je 1f
16070 SWAPGS
16071 /*
16072@@ -355,9 +639,10 @@ ENTRY(save_rest)
16073 movq_cfi r15, R15+16
16074 movq %r11, 8(%rsp) /* return address */
16075 FIXUP_TOP_OF_STACK %r11, 16
16076+ pax_force_retaddr
16077 ret
16078 CFI_ENDPROC
16079-END(save_rest)
16080+ENDPROC(save_rest)
16081
16082 /* save complete stack frame */
16083 .pushsection .kprobes.text, "ax"
16084@@ -386,9 +671,10 @@ ENTRY(save_paranoid)
16085 js 1f /* negative -> in kernel */
16086 SWAPGS
16087 xorl %ebx,%ebx
16088-1: ret
16089+1: pax_force_retaddr_bts
16090+ ret
16091 CFI_ENDPROC
16092-END(save_paranoid)
16093+ENDPROC(save_paranoid)
16094 .popsection
16095
16096 /*
16097@@ -410,7 +696,7 @@ ENTRY(ret_from_fork)
16098
16099 RESTORE_REST
16100
16101- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16102+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16103 jz retint_restore_args
16104
16105 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
16106@@ -420,7 +706,7 @@ ENTRY(ret_from_fork)
16107 jmp ret_from_sys_call # go to the SYSRET fastpath
16108
16109 CFI_ENDPROC
16110-END(ret_from_fork)
16111+ENDPROC(ret_from_fork)
16112
16113 /*
16114 * System call entry. Up to 6 arguments in registers are supported.
16115@@ -456,7 +742,7 @@ END(ret_from_fork)
16116 ENTRY(system_call)
16117 CFI_STARTPROC simple
16118 CFI_SIGNAL_FRAME
16119- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16120+ CFI_DEF_CFA rsp,0
16121 CFI_REGISTER rip,rcx
16122 /*CFI_REGISTER rflags,r11*/
16123 SWAPGS_UNSAFE_STACK
16124@@ -469,16 +755,18 @@ GLOBAL(system_call_after_swapgs)
16125
16126 movq %rsp,PER_CPU_VAR(old_rsp)
16127 movq PER_CPU_VAR(kernel_stack),%rsp
16128+ SAVE_ARGS 8*6,0
16129+ pax_enter_kernel_user
16130 /*
16131 * No need to follow this irqs off/on section - it's straight
16132 * and short:
16133 */
16134 ENABLE_INTERRUPTS(CLBR_NONE)
16135- SAVE_ARGS 8,0
16136 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16137 movq %rcx,RIP-ARGOFFSET(%rsp)
16138 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16139- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16140+ GET_THREAD_INFO(%rcx)
16141+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
16142 jnz tracesys
16143 system_call_fastpath:
16144 #if __SYSCALL_MASK == ~0
16145@@ -488,7 +776,7 @@ system_call_fastpath:
16146 cmpl $__NR_syscall_max,%eax
16147 #endif
16148 ja badsys
16149- movq %r10,%rcx
16150+ movq R10-ARGOFFSET(%rsp),%rcx
16151 call *sys_call_table(,%rax,8) # XXX: rip relative
16152 movq %rax,RAX-ARGOFFSET(%rsp)
16153 /*
16154@@ -502,10 +790,13 @@ sysret_check:
16155 LOCKDEP_SYS_EXIT
16156 DISABLE_INTERRUPTS(CLBR_NONE)
16157 TRACE_IRQS_OFF
16158- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
16159+ GET_THREAD_INFO(%rcx)
16160+ movl TI_flags(%rcx),%edx
16161 andl %edi,%edx
16162 jnz sysret_careful
16163 CFI_REMEMBER_STATE
16164+ pax_exit_kernel_user
16165+ pax_erase_kstack
16166 /*
16167 * sysretq will re-enable interrupts:
16168 */
16169@@ -557,14 +848,18 @@ badsys:
16170 * jump back to the normal fast path.
16171 */
16172 auditsys:
16173- movq %r10,%r9 /* 6th arg: 4th syscall arg */
16174+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16175 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16176 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16177 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16178 movq %rax,%rsi /* 2nd arg: syscall number */
16179 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16180 call __audit_syscall_entry
16181+
16182+ pax_erase_kstack
16183+
16184 LOAD_ARGS 0 /* reload call-clobbered registers */
16185+ pax_set_fptr_mask
16186 jmp system_call_fastpath
16187
16188 /*
16189@@ -585,7 +880,7 @@ sysret_audit:
16190 /* Do syscall tracing */
16191 tracesys:
16192 #ifdef CONFIG_AUDITSYSCALL
16193- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16194+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
16195 jz auditsys
16196 #endif
16197 SAVE_REST
16198@@ -593,12 +888,16 @@ tracesys:
16199 FIXUP_TOP_OF_STACK %rdi
16200 movq %rsp,%rdi
16201 call syscall_trace_enter
16202+
16203+ pax_erase_kstack
16204+
16205 /*
16206 * Reload arg registers from stack in case ptrace changed them.
16207 * We don't reload %rax because syscall_trace_enter() returned
16208 * the value it wants us to use in the table lookup.
16209 */
16210 LOAD_ARGS ARGOFFSET, 1
16211+ pax_set_fptr_mask
16212 RESTORE_REST
16213 #if __SYSCALL_MASK == ~0
16214 cmpq $__NR_syscall_max,%rax
16215@@ -607,7 +906,7 @@ tracesys:
16216 cmpl $__NR_syscall_max,%eax
16217 #endif
16218 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16219- movq %r10,%rcx /* fixup for C */
16220+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16221 call *sys_call_table(,%rax,8)
16222 movq %rax,RAX-ARGOFFSET(%rsp)
16223 /* Use IRET because user could have changed frame */
16224@@ -628,6 +927,7 @@ GLOBAL(int_with_check)
16225 andl %edi,%edx
16226 jnz int_careful
16227 andl $~TS_COMPAT,TI_status(%rcx)
16228+ pax_erase_kstack
16229 jmp retint_swapgs
16230
16231 /* Either reschedule or signal or syscall exit tracking needed. */
16232@@ -674,7 +974,7 @@ int_restore_rest:
16233 TRACE_IRQS_OFF
16234 jmp int_with_check
16235 CFI_ENDPROC
16236-END(system_call)
16237+ENDPROC(system_call)
16238
16239 /*
16240 * Certain special system calls that need to save a complete full stack frame.
16241@@ -690,7 +990,7 @@ ENTRY(\label)
16242 call \func
16243 jmp ptregscall_common
16244 CFI_ENDPROC
16245-END(\label)
16246+ENDPROC(\label)
16247 .endm
16248
16249 PTREGSCALL stub_clone, sys_clone, %r8
16250@@ -708,9 +1008,10 @@ ENTRY(ptregscall_common)
16251 movq_cfi_restore R12+8, r12
16252 movq_cfi_restore RBP+8, rbp
16253 movq_cfi_restore RBX+8, rbx
16254+ pax_force_retaddr
16255 ret $REST_SKIP /* pop extended registers */
16256 CFI_ENDPROC
16257-END(ptregscall_common)
16258+ENDPROC(ptregscall_common)
16259
16260 ENTRY(stub_execve)
16261 CFI_STARTPROC
16262@@ -725,7 +1026,7 @@ ENTRY(stub_execve)
16263 RESTORE_REST
16264 jmp int_ret_from_sys_call
16265 CFI_ENDPROC
16266-END(stub_execve)
16267+ENDPROC(stub_execve)
16268
16269 /*
16270 * sigreturn is special because it needs to restore all registers on return.
16271@@ -743,7 +1044,7 @@ ENTRY(stub_rt_sigreturn)
16272 RESTORE_REST
16273 jmp int_ret_from_sys_call
16274 CFI_ENDPROC
16275-END(stub_rt_sigreturn)
16276+ENDPROC(stub_rt_sigreturn)
16277
16278 #ifdef CONFIG_X86_X32_ABI
16279 PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
16280@@ -812,7 +1113,7 @@ vector=vector+1
16281 2: jmp common_interrupt
16282 .endr
16283 CFI_ENDPROC
16284-END(irq_entries_start)
16285+ENDPROC(irq_entries_start)
16286
16287 .previous
16288 END(interrupt)
16289@@ -832,6 +1133,16 @@ END(interrupt)
16290 subq $ORIG_RAX-RBP, %rsp
16291 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
16292 SAVE_ARGS_IRQ
16293+#ifdef CONFIG_PAX_MEMORY_UDEREF
16294+ testb $3, CS(%rdi)
16295+ jnz 1f
16296+ pax_enter_kernel
16297+ jmp 2f
16298+1: pax_enter_kernel_user
16299+2:
16300+#else
16301+ pax_enter_kernel
16302+#endif
16303 call \func
16304 .endm
16305
16306@@ -863,7 +1174,7 @@ ret_from_intr:
16307
16308 exit_intr:
16309 GET_THREAD_INFO(%rcx)
16310- testl $3,CS-ARGOFFSET(%rsp)
16311+ testb $3,CS-ARGOFFSET(%rsp)
16312 je retint_kernel
16313
16314 /* Interrupt came from user space */
16315@@ -885,12 +1196,15 @@ retint_swapgs: /* return to user-space */
16316 * The iretq could re-enable interrupts:
16317 */
16318 DISABLE_INTERRUPTS(CLBR_ANY)
16319+ pax_exit_kernel_user
16320 TRACE_IRQS_IRETQ
16321 SWAPGS
16322 jmp restore_args
16323
16324 retint_restore_args: /* return to kernel space */
16325 DISABLE_INTERRUPTS(CLBR_ANY)
16326+ pax_exit_kernel
16327+ pax_force_retaddr RIP-ARGOFFSET
16328 /*
16329 * The iretq could re-enable interrupts:
16330 */
16331@@ -979,7 +1293,7 @@ ENTRY(retint_kernel)
16332 #endif
16333
16334 CFI_ENDPROC
16335-END(common_interrupt)
16336+ENDPROC(common_interrupt)
16337 /*
16338 * End of kprobes section
16339 */
16340@@ -996,7 +1310,7 @@ ENTRY(\sym)
16341 interrupt \do_sym
16342 jmp ret_from_intr
16343 CFI_ENDPROC
16344-END(\sym)
16345+ENDPROC(\sym)
16346 .endm
16347
16348 #ifdef CONFIG_SMP
16349@@ -1069,12 +1383,22 @@ ENTRY(\sym)
16350 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16351 call error_entry
16352 DEFAULT_FRAME 0
16353+#ifdef CONFIG_PAX_MEMORY_UDEREF
16354+ testb $3, CS(%rsp)
16355+ jnz 1f
16356+ pax_enter_kernel
16357+ jmp 2f
16358+1: pax_enter_kernel_user
16359+2:
16360+#else
16361+ pax_enter_kernel
16362+#endif
16363 movq %rsp,%rdi /* pt_regs pointer */
16364 xorl %esi,%esi /* no error code */
16365 call \do_sym
16366 jmp error_exit /* %ebx: no swapgs flag */
16367 CFI_ENDPROC
16368-END(\sym)
16369+ENDPROC(\sym)
16370 .endm
16371
16372 .macro paranoidzeroentry sym do_sym
16373@@ -1086,15 +1410,25 @@ ENTRY(\sym)
16374 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16375 call save_paranoid
16376 TRACE_IRQS_OFF
16377+#ifdef CONFIG_PAX_MEMORY_UDEREF
16378+ testb $3, CS(%rsp)
16379+ jnz 1f
16380+ pax_enter_kernel
16381+ jmp 2f
16382+1: pax_enter_kernel_user
16383+2:
16384+#else
16385+ pax_enter_kernel
16386+#endif
16387 movq %rsp,%rdi /* pt_regs pointer */
16388 xorl %esi,%esi /* no error code */
16389 call \do_sym
16390 jmp paranoid_exit /* %ebx: no swapgs flag */
16391 CFI_ENDPROC
16392-END(\sym)
16393+ENDPROC(\sym)
16394 .endm
16395
16396-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
16397+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
16398 .macro paranoidzeroentry_ist sym do_sym ist
16399 ENTRY(\sym)
16400 INTR_FRAME
16401@@ -1104,14 +1438,30 @@ ENTRY(\sym)
16402 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16403 call save_paranoid
16404 TRACE_IRQS_OFF
16405+#ifdef CONFIG_PAX_MEMORY_UDEREF
16406+ testb $3, CS(%rsp)
16407+ jnz 1f
16408+ pax_enter_kernel
16409+ jmp 2f
16410+1: pax_enter_kernel_user
16411+2:
16412+#else
16413+ pax_enter_kernel
16414+#endif
16415 movq %rsp,%rdi /* pt_regs pointer */
16416 xorl %esi,%esi /* no error code */
16417+#ifdef CONFIG_SMP
16418+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
16419+ lea init_tss(%r12), %r12
16420+#else
16421+ lea init_tss(%rip), %r12
16422+#endif
16423 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16424 call \do_sym
16425 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16426 jmp paranoid_exit /* %ebx: no swapgs flag */
16427 CFI_ENDPROC
16428-END(\sym)
16429+ENDPROC(\sym)
16430 .endm
16431
16432 .macro errorentry sym do_sym
16433@@ -1122,13 +1472,23 @@ ENTRY(\sym)
16434 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16435 call error_entry
16436 DEFAULT_FRAME 0
16437+#ifdef CONFIG_PAX_MEMORY_UDEREF
16438+ testb $3, CS(%rsp)
16439+ jnz 1f
16440+ pax_enter_kernel
16441+ jmp 2f
16442+1: pax_enter_kernel_user
16443+2:
16444+#else
16445+ pax_enter_kernel
16446+#endif
16447 movq %rsp,%rdi /* pt_regs pointer */
16448 movq ORIG_RAX(%rsp),%rsi /* get error code */
16449 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16450 call \do_sym
16451 jmp error_exit /* %ebx: no swapgs flag */
16452 CFI_ENDPROC
16453-END(\sym)
16454+ENDPROC(\sym)
16455 .endm
16456
16457 /* error code is on the stack already */
16458@@ -1141,13 +1501,23 @@ ENTRY(\sym)
16459 call save_paranoid
16460 DEFAULT_FRAME 0
16461 TRACE_IRQS_OFF
16462+#ifdef CONFIG_PAX_MEMORY_UDEREF
16463+ testb $3, CS(%rsp)
16464+ jnz 1f
16465+ pax_enter_kernel
16466+ jmp 2f
16467+1: pax_enter_kernel_user
16468+2:
16469+#else
16470+ pax_enter_kernel
16471+#endif
16472 movq %rsp,%rdi /* pt_regs pointer */
16473 movq ORIG_RAX(%rsp),%rsi /* get error code */
16474 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16475 call \do_sym
16476 jmp paranoid_exit /* %ebx: no swapgs flag */
16477 CFI_ENDPROC
16478-END(\sym)
16479+ENDPROC(\sym)
16480 .endm
16481
16482 zeroentry divide_error do_divide_error
16483@@ -1177,9 +1547,10 @@ gs_change:
16484 2: mfence /* workaround */
16485 SWAPGS
16486 popfq_cfi
16487+ pax_force_retaddr
16488 ret
16489 CFI_ENDPROC
16490-END(native_load_gs_index)
16491+ENDPROC(native_load_gs_index)
16492
16493 .section __ex_table,"a"
16494 .align 8
16495@@ -1201,13 +1572,14 @@ ENTRY(kernel_thread_helper)
16496 * Here we are in the child and the registers are set as they were
16497 * at kernel_thread() invocation in the parent.
16498 */
16499+ pax_force_fptr %rsi
16500 call *%rsi
16501 # exit
16502 mov %eax, %edi
16503 call do_exit
16504 ud2 # padding for call trace
16505 CFI_ENDPROC
16506-END(kernel_thread_helper)
16507+ENDPROC(kernel_thread_helper)
16508
16509 /*
16510 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16511@@ -1234,11 +1606,11 @@ ENTRY(kernel_execve)
16512 RESTORE_REST
16513 testq %rax,%rax
16514 je int_ret_from_sys_call
16515- RESTORE_ARGS
16516 UNFAKE_STACK_FRAME
16517+ pax_force_retaddr
16518 ret
16519 CFI_ENDPROC
16520-END(kernel_execve)
16521+ENDPROC(kernel_execve)
16522
16523 /* Call softirq on interrupt stack. Interrupts are off. */
16524 ENTRY(call_softirq)
16525@@ -1256,9 +1628,10 @@ ENTRY(call_softirq)
16526 CFI_DEF_CFA_REGISTER rsp
16527 CFI_ADJUST_CFA_OFFSET -8
16528 decl PER_CPU_VAR(irq_count)
16529+ pax_force_retaddr
16530 ret
16531 CFI_ENDPROC
16532-END(call_softirq)
16533+ENDPROC(call_softirq)
16534
16535 #ifdef CONFIG_XEN
16536 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16537@@ -1296,7 +1669,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16538 decl PER_CPU_VAR(irq_count)
16539 jmp error_exit
16540 CFI_ENDPROC
16541-END(xen_do_hypervisor_callback)
16542+ENDPROC(xen_do_hypervisor_callback)
16543
16544 /*
16545 * Hypervisor uses this for application faults while it executes.
16546@@ -1355,7 +1728,7 @@ ENTRY(xen_failsafe_callback)
16547 SAVE_ALL
16548 jmp error_exit
16549 CFI_ENDPROC
16550-END(xen_failsafe_callback)
16551+ENDPROC(xen_failsafe_callback)
16552
16553 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
16554 xen_hvm_callback_vector xen_evtchn_do_upcall
16555@@ -1404,16 +1777,31 @@ ENTRY(paranoid_exit)
16556 TRACE_IRQS_OFF
16557 testl %ebx,%ebx /* swapgs needed? */
16558 jnz paranoid_restore
16559- testl $3,CS(%rsp)
16560+ testb $3,CS(%rsp)
16561 jnz paranoid_userspace
16562+#ifdef CONFIG_PAX_MEMORY_UDEREF
16563+ pax_exit_kernel
16564+ TRACE_IRQS_IRETQ 0
16565+ SWAPGS_UNSAFE_STACK
16566+ RESTORE_ALL 8
16567+ pax_force_retaddr_bts
16568+ jmp irq_return
16569+#endif
16570 paranoid_swapgs:
16571+#ifdef CONFIG_PAX_MEMORY_UDEREF
16572+ pax_exit_kernel_user
16573+#else
16574+ pax_exit_kernel
16575+#endif
16576 TRACE_IRQS_IRETQ 0
16577 SWAPGS_UNSAFE_STACK
16578 RESTORE_ALL 8
16579 jmp irq_return
16580 paranoid_restore:
16581+ pax_exit_kernel
16582 TRACE_IRQS_IRETQ 0
16583 RESTORE_ALL 8
16584+ pax_force_retaddr_bts
16585 jmp irq_return
16586 paranoid_userspace:
16587 GET_THREAD_INFO(%rcx)
16588@@ -1442,7 +1830,7 @@ paranoid_schedule:
16589 TRACE_IRQS_OFF
16590 jmp paranoid_userspace
16591 CFI_ENDPROC
16592-END(paranoid_exit)
16593+ENDPROC(paranoid_exit)
16594
16595 /*
16596 * Exception entry point. This expects an error code/orig_rax on the stack.
16597@@ -1469,12 +1857,13 @@ ENTRY(error_entry)
16598 movq_cfi r14, R14+8
16599 movq_cfi r15, R15+8
16600 xorl %ebx,%ebx
16601- testl $3,CS+8(%rsp)
16602+ testb $3,CS+8(%rsp)
16603 je error_kernelspace
16604 error_swapgs:
16605 SWAPGS
16606 error_sti:
16607 TRACE_IRQS_OFF
16608+ pax_force_retaddr_bts
16609 ret
16610
16611 /*
16612@@ -1501,7 +1890,7 @@ bstep_iret:
16613 movq %rcx,RIP+8(%rsp)
16614 jmp error_swapgs
16615 CFI_ENDPROC
16616-END(error_entry)
16617+ENDPROC(error_entry)
16618
16619
16620 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16621@@ -1521,7 +1910,7 @@ ENTRY(error_exit)
16622 jnz retint_careful
16623 jmp retint_swapgs
16624 CFI_ENDPROC
16625-END(error_exit)
16626+ENDPROC(error_exit)
16627
16628 /*
16629 * Test if a given stack is an NMI stack or not.
16630@@ -1579,9 +1968,11 @@ ENTRY(nmi)
16631 * If %cs was not the kernel segment, then the NMI triggered in user
16632 * space, which means it is definitely not nested.
16633 */
16634+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
16635+ je 1f
16636 cmpl $__KERNEL_CS, 16(%rsp)
16637 jne first_nmi
16638-
16639+1:
16640 /*
16641 * Check the special variable on the stack to see if NMIs are
16642 * executing.
16643@@ -1728,6 +2119,16 @@ end_repeat_nmi:
16644 */
16645 call save_paranoid
16646 DEFAULT_FRAME 0
16647+#ifdef CONFIG_PAX_MEMORY_UDEREF
16648+ testb $3, CS(%rsp)
16649+ jnz 1f
16650+ pax_enter_kernel
16651+ jmp 2f
16652+1: pax_enter_kernel_user
16653+2:
16654+#else
16655+ pax_enter_kernel
16656+#endif
16657 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16658 movq %rsp,%rdi
16659 movq $-1,%rsi
16660@@ -1735,21 +2136,32 @@ end_repeat_nmi:
16661 testl %ebx,%ebx /* swapgs needed? */
16662 jnz nmi_restore
16663 nmi_swapgs:
16664+#ifdef CONFIG_PAX_MEMORY_UDEREF
16665+ pax_exit_kernel_user
16666+#else
16667+ pax_exit_kernel
16668+#endif
16669 SWAPGS_UNSAFE_STACK
16670+ RESTORE_ALL 8
16671+ /* Clear the NMI executing stack variable */
16672+ movq $0, 10*8(%rsp)
16673+ jmp irq_return
16674 nmi_restore:
16675+ pax_exit_kernel
16676 RESTORE_ALL 8
16677+ pax_force_retaddr_bts
16678 /* Clear the NMI executing stack variable */
16679 movq $0, 10*8(%rsp)
16680 jmp irq_return
16681 CFI_ENDPROC
16682-END(nmi)
16683+ENDPROC(nmi)
16684
16685 ENTRY(ignore_sysret)
16686 CFI_STARTPROC
16687 mov $-ENOSYS,%eax
16688 sysret
16689 CFI_ENDPROC
16690-END(ignore_sysret)
16691+ENDPROC(ignore_sysret)
16692
16693 /*
16694 * End of kprobes section
16695diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16696index c9a281f..ce2f317 100644
16697--- a/arch/x86/kernel/ftrace.c
16698+++ b/arch/x86/kernel/ftrace.c
16699@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16700 static const void *mod_code_newcode; /* holds the text to write to the IP */
16701
16702 static unsigned nmi_wait_count;
16703-static atomic_t nmi_update_count = ATOMIC_INIT(0);
16704+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16705
16706 int ftrace_arch_read_dyn_info(char *buf, int size)
16707 {
16708@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16709
16710 r = snprintf(buf, size, "%u %u",
16711 nmi_wait_count,
16712- atomic_read(&nmi_update_count));
16713+ atomic_read_unchecked(&nmi_update_count));
16714 return r;
16715 }
16716
16717@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
16718
16719 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16720 smp_rmb();
16721+ pax_open_kernel();
16722 ftrace_mod_code();
16723- atomic_inc(&nmi_update_count);
16724+ pax_close_kernel();
16725+ atomic_inc_unchecked(&nmi_update_count);
16726 }
16727 /* Must have previous changes seen before executions */
16728 smp_mb();
16729@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
16730 {
16731 unsigned char replaced[MCOUNT_INSN_SIZE];
16732
16733+ ip = ktla_ktva(ip);
16734+
16735 /*
16736 * Note: Due to modules and __init, code can
16737 * disappear and change, we need to protect against faulting
16738@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16739 unsigned char old[MCOUNT_INSN_SIZE], *new;
16740 int ret;
16741
16742- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16743+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16744 new = ftrace_call_replace(ip, (unsigned long)func);
16745 ret = ftrace_modify_code(ip, old, new);
16746
16747@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16748 {
16749 unsigned char code[MCOUNT_INSN_SIZE];
16750
16751+ ip = ktla_ktva(ip);
16752+
16753 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16754 return -EFAULT;
16755
16756diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16757index 51ff186..9e77418 100644
16758--- a/arch/x86/kernel/head32.c
16759+++ b/arch/x86/kernel/head32.c
16760@@ -19,6 +19,7 @@
16761 #include <asm/io_apic.h>
16762 #include <asm/bios_ebda.h>
16763 #include <asm/tlbflush.h>
16764+#include <asm/boot.h>
16765
16766 static void __init i386_default_early_setup(void)
16767 {
16768@@ -31,8 +32,7 @@ static void __init i386_default_early_setup(void)
16769
16770 void __init i386_start_kernel(void)
16771 {
16772- memblock_reserve(__pa_symbol(&_text),
16773- __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
16774+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
16775
16776 #ifdef CONFIG_BLK_DEV_INITRD
16777 /* Reserve INITRD */
16778diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16779index ce0be7c..c41476e 100644
16780--- a/arch/x86/kernel/head_32.S
16781+++ b/arch/x86/kernel/head_32.S
16782@@ -25,6 +25,12 @@
16783 /* Physical address */
16784 #define pa(X) ((X) - __PAGE_OFFSET)
16785
16786+#ifdef CONFIG_PAX_KERNEXEC
16787+#define ta(X) (X)
16788+#else
16789+#define ta(X) ((X) - __PAGE_OFFSET)
16790+#endif
16791+
16792 /*
16793 * References to members of the new_cpu_data structure.
16794 */
16795@@ -54,11 +60,7 @@
16796 * and small than max_low_pfn, otherwise will waste some page table entries
16797 */
16798
16799-#if PTRS_PER_PMD > 1
16800-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16801-#else
16802-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16803-#endif
16804+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16805
16806 /* Number of possible pages in the lowmem region */
16807 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
16808@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
16809 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16810
16811 /*
16812+ * Real beginning of normal "text" segment
16813+ */
16814+ENTRY(stext)
16815+ENTRY(_stext)
16816+
16817+/*
16818 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16819 * %esi points to the real-mode code as a 32-bit pointer.
16820 * CS and DS must be 4 GB flat segments, but we don't depend on
16821@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16822 * can.
16823 */
16824 __HEAD
16825+
16826+#ifdef CONFIG_PAX_KERNEXEC
16827+ jmp startup_32
16828+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16829+.fill PAGE_SIZE-5,1,0xcc
16830+#endif
16831+
16832 ENTRY(startup_32)
16833 movl pa(stack_start),%ecx
16834
16835@@ -105,6 +120,57 @@ ENTRY(startup_32)
16836 2:
16837 leal -__PAGE_OFFSET(%ecx),%esp
16838
16839+#ifdef CONFIG_SMP
16840+ movl $pa(cpu_gdt_table),%edi
16841+ movl $__per_cpu_load,%eax
16842+ movw %ax,__KERNEL_PERCPU + 2(%edi)
16843+ rorl $16,%eax
16844+ movb %al,__KERNEL_PERCPU + 4(%edi)
16845+ movb %ah,__KERNEL_PERCPU + 7(%edi)
16846+ movl $__per_cpu_end - 1,%eax
16847+ subl $__per_cpu_start,%eax
16848+ movw %ax,__KERNEL_PERCPU + 0(%edi)
16849+#endif
16850+
16851+#ifdef CONFIG_PAX_MEMORY_UDEREF
16852+ movl $NR_CPUS,%ecx
16853+ movl $pa(cpu_gdt_table),%edi
16854+1:
16855+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16856+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16857+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16858+ addl $PAGE_SIZE_asm,%edi
16859+ loop 1b
16860+#endif
16861+
16862+#ifdef CONFIG_PAX_KERNEXEC
16863+ movl $pa(boot_gdt),%edi
16864+ movl $__LOAD_PHYSICAL_ADDR,%eax
16865+ movw %ax,__BOOT_CS + 2(%edi)
16866+ rorl $16,%eax
16867+ movb %al,__BOOT_CS + 4(%edi)
16868+ movb %ah,__BOOT_CS + 7(%edi)
16869+ rorl $16,%eax
16870+
16871+ ljmp $(__BOOT_CS),$1f
16872+1:
16873+
16874+ movl $NR_CPUS,%ecx
16875+ movl $pa(cpu_gdt_table),%edi
16876+ addl $__PAGE_OFFSET,%eax
16877+1:
16878+ movw %ax,__KERNEL_CS + 2(%edi)
16879+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16880+ rorl $16,%eax
16881+ movb %al,__KERNEL_CS + 4(%edi)
16882+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16883+ movb %ah,__KERNEL_CS + 7(%edi)
16884+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16885+ rorl $16,%eax
16886+ addl $PAGE_SIZE_asm,%edi
16887+ loop 1b
16888+#endif
16889+
16890 /*
16891 * Clear BSS first so that there are no surprises...
16892 */
16893@@ -195,8 +261,11 @@ ENTRY(startup_32)
16894 movl %eax, pa(max_pfn_mapped)
16895
16896 /* Do early initialization of the fixmap area */
16897- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16898- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
16899+#ifdef CONFIG_COMPAT_VDSO
16900+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
16901+#else
16902+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
16903+#endif
16904 #else /* Not PAE */
16905
16906 page_pde_offset = (__PAGE_OFFSET >> 20);
16907@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16908 movl %eax, pa(max_pfn_mapped)
16909
16910 /* Do early initialization of the fixmap area */
16911- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16912- movl %eax,pa(initial_page_table+0xffc)
16913+#ifdef CONFIG_COMPAT_VDSO
16914+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
16915+#else
16916+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
16917+#endif
16918 #endif
16919
16920 #ifdef CONFIG_PARAVIRT
16921@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16922 cmpl $num_subarch_entries, %eax
16923 jae bad_subarch
16924
16925- movl pa(subarch_entries)(,%eax,4), %eax
16926- subl $__PAGE_OFFSET, %eax
16927- jmp *%eax
16928+ jmp *pa(subarch_entries)(,%eax,4)
16929
16930 bad_subarch:
16931 WEAK(lguest_entry)
16932@@ -255,10 +325,10 @@ WEAK(xen_entry)
16933 __INITDATA
16934
16935 subarch_entries:
16936- .long default_entry /* normal x86/PC */
16937- .long lguest_entry /* lguest hypervisor */
16938- .long xen_entry /* Xen hypervisor */
16939- .long default_entry /* Moorestown MID */
16940+ .long ta(default_entry) /* normal x86/PC */
16941+ .long ta(lguest_entry) /* lguest hypervisor */
16942+ .long ta(xen_entry) /* Xen hypervisor */
16943+ .long ta(default_entry) /* Moorestown MID */
16944 num_subarch_entries = (. - subarch_entries) / 4
16945 .previous
16946 #else
16947@@ -312,6 +382,7 @@ default_entry:
16948 orl %edx,%eax
16949 movl %eax,%cr4
16950
16951+#ifdef CONFIG_X86_PAE
16952 testb $X86_CR4_PAE, %al # check if PAE is enabled
16953 jz 6f
16954
16955@@ -340,6 +411,9 @@ default_entry:
16956 /* Make changes effective */
16957 wrmsr
16958
16959+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16960+#endif
16961+
16962 6:
16963
16964 /*
16965@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
16966 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
16967 movl %eax,%ss # after changing gdt.
16968
16969- movl $(__USER_DS),%eax # DS/ES contains default USER segment
16970+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
16971 movl %eax,%ds
16972 movl %eax,%es
16973
16974@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
16975 */
16976 cmpb $0,ready
16977 jne 1f
16978- movl $gdt_page,%eax
16979+ movl $cpu_gdt_table,%eax
16980 movl $stack_canary,%ecx
16981+#ifdef CONFIG_SMP
16982+ addl $__per_cpu_load,%ecx
16983+#endif
16984 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
16985 shrl $16, %ecx
16986 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
16987 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
16988 1:
16989-#endif
16990 movl $(__KERNEL_STACK_CANARY),%eax
16991+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
16992+ movl $(__USER_DS),%eax
16993+#else
16994+ xorl %eax,%eax
16995+#endif
16996 movl %eax,%gs
16997
16998 xorl %eax,%eax # Clear LDT
16999@@ -558,22 +639,22 @@ early_page_fault:
17000 jmp early_fault
17001
17002 early_fault:
17003- cld
17004 #ifdef CONFIG_PRINTK
17005+ cmpl $1,%ss:early_recursion_flag
17006+ je hlt_loop
17007+ incl %ss:early_recursion_flag
17008+ cld
17009 pusha
17010 movl $(__KERNEL_DS),%eax
17011 movl %eax,%ds
17012 movl %eax,%es
17013- cmpl $2,early_recursion_flag
17014- je hlt_loop
17015- incl early_recursion_flag
17016 movl %cr2,%eax
17017 pushl %eax
17018 pushl %edx /* trapno */
17019 pushl $fault_msg
17020 call printk
17021+; call dump_stack
17022 #endif
17023- call dump_stack
17024 hlt_loop:
17025 hlt
17026 jmp hlt_loop
17027@@ -581,8 +662,11 @@ hlt_loop:
17028 /* This is the default interrupt "handler" :-) */
17029 ALIGN
17030 ignore_int:
17031- cld
17032 #ifdef CONFIG_PRINTK
17033+ cmpl $2,%ss:early_recursion_flag
17034+ je hlt_loop
17035+ incl %ss:early_recursion_flag
17036+ cld
17037 pushl %eax
17038 pushl %ecx
17039 pushl %edx
17040@@ -591,9 +675,6 @@ ignore_int:
17041 movl $(__KERNEL_DS),%eax
17042 movl %eax,%ds
17043 movl %eax,%es
17044- cmpl $2,early_recursion_flag
17045- je hlt_loop
17046- incl early_recursion_flag
17047 pushl 16(%esp)
17048 pushl 24(%esp)
17049 pushl 32(%esp)
17050@@ -622,29 +703,43 @@ ENTRY(initial_code)
17051 /*
17052 * BSS section
17053 */
17054-__PAGE_ALIGNED_BSS
17055- .align PAGE_SIZE
17056 #ifdef CONFIG_X86_PAE
17057+.section .initial_pg_pmd,"a",@progbits
17058 initial_pg_pmd:
17059 .fill 1024*KPMDS,4,0
17060 #else
17061+.section .initial_page_table,"a",@progbits
17062 ENTRY(initial_page_table)
17063 .fill 1024,4,0
17064 #endif
17065+.section .initial_pg_fixmap,"a",@progbits
17066 initial_pg_fixmap:
17067 .fill 1024,4,0
17068+.section .empty_zero_page,"a",@progbits
17069 ENTRY(empty_zero_page)
17070 .fill 4096,1,0
17071+.section .swapper_pg_dir,"a",@progbits
17072 ENTRY(swapper_pg_dir)
17073+#ifdef CONFIG_X86_PAE
17074+ .fill 4,8,0
17075+#else
17076 .fill 1024,4,0
17077+#endif
17078+
17079+/*
17080+ * The IDT has to be page-aligned to simplify the Pentium
17081+ * F0 0F bug workaround.. We have a special link segment
17082+ * for this.
17083+ */
17084+.section .idt,"a",@progbits
17085+ENTRY(idt_table)
17086+ .fill 256,8,0
17087
17088 /*
17089 * This starts the data section.
17090 */
17091 #ifdef CONFIG_X86_PAE
17092-__PAGE_ALIGNED_DATA
17093- /* Page-aligned for the benefit of paravirt? */
17094- .align PAGE_SIZE
17095+.section .initial_page_table,"a",@progbits
17096 ENTRY(initial_page_table)
17097 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17098 # if KPMDS == 3
17099@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
17100 # error "Kernel PMDs should be 1, 2 or 3"
17101 # endif
17102 .align PAGE_SIZE /* needs to be page-sized too */
17103+
17104+#ifdef CONFIG_PAX_PER_CPU_PGD
17105+ENTRY(cpu_pgd)
17106+ .rept NR_CPUS
17107+ .fill 4,8,0
17108+ .endr
17109+#endif
17110+
17111 #endif
17112
17113 .data
17114 .balign 4
17115 ENTRY(stack_start)
17116- .long init_thread_union+THREAD_SIZE
17117+ .long init_thread_union+THREAD_SIZE-8
17118
17119+ready: .byte 0
17120+
17121+.section .rodata,"a",@progbits
17122 early_recursion_flag:
17123 .long 0
17124
17125-ready: .byte 0
17126-
17127 int_msg:
17128 .asciz "Unknown interrupt or fault at: %p %p %p\n"
17129
17130@@ -707,7 +811,7 @@ fault_msg:
17131 .word 0 # 32 bit align gdt_desc.address
17132 boot_gdt_descr:
17133 .word __BOOT_DS+7
17134- .long boot_gdt - __PAGE_OFFSET
17135+ .long pa(boot_gdt)
17136
17137 .word 0 # 32-bit align idt_desc.address
17138 idt_descr:
17139@@ -718,7 +822,7 @@ idt_descr:
17140 .word 0 # 32 bit align gdt_desc.address
17141 ENTRY(early_gdt_descr)
17142 .word GDT_ENTRIES*8-1
17143- .long gdt_page /* Overwritten for secondary CPUs */
17144+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
17145
17146 /*
17147 * The boot_gdt must mirror the equivalent in setup.S and is
17148@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
17149 .align L1_CACHE_BYTES
17150 ENTRY(boot_gdt)
17151 .fill GDT_ENTRY_BOOT_CS,8,0
17152- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17153- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17154+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17155+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17156+
17157+ .align PAGE_SIZE_asm
17158+ENTRY(cpu_gdt_table)
17159+ .rept NR_CPUS
17160+ .quad 0x0000000000000000 /* NULL descriptor */
17161+ .quad 0x0000000000000000 /* 0x0b reserved */
17162+ .quad 0x0000000000000000 /* 0x13 reserved */
17163+ .quad 0x0000000000000000 /* 0x1b reserved */
17164+
17165+#ifdef CONFIG_PAX_KERNEXEC
17166+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17167+#else
17168+ .quad 0x0000000000000000 /* 0x20 unused */
17169+#endif
17170+
17171+ .quad 0x0000000000000000 /* 0x28 unused */
17172+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17173+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17174+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17175+ .quad 0x0000000000000000 /* 0x4b reserved */
17176+ .quad 0x0000000000000000 /* 0x53 reserved */
17177+ .quad 0x0000000000000000 /* 0x5b reserved */
17178+
17179+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17180+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17181+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17182+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17183+
17184+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17185+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17186+
17187+ /*
17188+ * Segments used for calling PnP BIOS have byte granularity.
17189+ * The code segments and data segments have fixed 64k limits,
17190+ * the transfer segment sizes are set at run time.
17191+ */
17192+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
17193+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
17194+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
17195+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
17196+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
17197+
17198+ /*
17199+ * The APM segments have byte granularity and their bases
17200+ * are set at run time. All have 64k limits.
17201+ */
17202+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17203+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17204+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
17205+
17206+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17207+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17208+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17209+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17210+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17211+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17212+
17213+ /* Be sure this is zeroed to avoid false validations in Xen */
17214+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17215+ .endr
17216diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17217index 40f4eb3..6d24d9d 100644
17218--- a/arch/x86/kernel/head_64.S
17219+++ b/arch/x86/kernel/head_64.S
17220@@ -19,6 +19,8 @@
17221 #include <asm/cache.h>
17222 #include <asm/processor-flags.h>
17223 #include <asm/percpu.h>
17224+#include <asm/cpufeature.h>
17225+#include <asm/alternative-asm.h>
17226
17227 #ifdef CONFIG_PARAVIRT
17228 #include <asm/asm-offsets.h>
17229@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17230 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17231 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17232 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17233+L4_VMALLOC_START = pgd_index(VMALLOC_START)
17234+L3_VMALLOC_START = pud_index(VMALLOC_START)
17235+L4_VMALLOC_END = pgd_index(VMALLOC_END)
17236+L3_VMALLOC_END = pud_index(VMALLOC_END)
17237+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17238+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17239
17240 .text
17241 __HEAD
17242@@ -85,35 +93,23 @@ startup_64:
17243 */
17244 addq %rbp, init_level4_pgt + 0(%rip)
17245 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17246+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17247+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
17248+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17249 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17250
17251 addq %rbp, level3_ident_pgt + 0(%rip)
17252+#ifndef CONFIG_XEN
17253+ addq %rbp, level3_ident_pgt + 8(%rip)
17254+#endif
17255
17256- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17257- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17258+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17259+
17260+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17261+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17262
17263 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17264-
17265- /* Add an Identity mapping if I am above 1G */
17266- leaq _text(%rip), %rdi
17267- andq $PMD_PAGE_MASK, %rdi
17268-
17269- movq %rdi, %rax
17270- shrq $PUD_SHIFT, %rax
17271- andq $(PTRS_PER_PUD - 1), %rax
17272- jz ident_complete
17273-
17274- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17275- leaq level3_ident_pgt(%rip), %rbx
17276- movq %rdx, 0(%rbx, %rax, 8)
17277-
17278- movq %rdi, %rax
17279- shrq $PMD_SHIFT, %rax
17280- andq $(PTRS_PER_PMD - 1), %rax
17281- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17282- leaq level2_spare_pgt(%rip), %rbx
17283- movq %rdx, 0(%rbx, %rax, 8)
17284-ident_complete:
17285+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17286
17287 /*
17288 * Fixup the kernel text+data virtual addresses. Note that
17289@@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
17290 * after the boot processor executes this code.
17291 */
17292
17293- /* Enable PAE mode and PGE */
17294- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17295+ /* Enable PAE mode and PSE/PGE */
17296+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17297 movq %rax, %cr4
17298
17299 /* Setup early boot stage 4 level pagetables. */
17300@@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
17301 movl $MSR_EFER, %ecx
17302 rdmsr
17303 btsl $_EFER_SCE, %eax /* Enable System Call */
17304- btl $20,%edi /* No Execute supported? */
17305+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17306 jnc 1f
17307 btsl $_EFER_NX, %eax
17308+ leaq init_level4_pgt(%rip), %rdi
17309+#ifndef CONFIG_EFI
17310+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17311+#endif
17312+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17313+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
17314+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17315+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
17316 1: wrmsr /* Make changes effective */
17317
17318 /* Setup cr0 */
17319@@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
17320 * jump. In addition we need to ensure %cs is set so we make this
17321 * a far return.
17322 */
17323+ pax_set_fptr_mask
17324 movq initial_code(%rip),%rax
17325 pushq $0 # fake return address to stop unwinder
17326 pushq $__KERNEL_CS # set correct cs
17327@@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
17328 bad_address:
17329 jmp bad_address
17330
17331- .section ".init.text","ax"
17332+ __INIT
17333 #ifdef CONFIG_EARLY_PRINTK
17334 .globl early_idt_handlers
17335 early_idt_handlers:
17336@@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
17337 #endif /* EARLY_PRINTK */
17338 1: hlt
17339 jmp 1b
17340+ .previous
17341
17342 #ifdef CONFIG_EARLY_PRINTK
17343+ __INITDATA
17344 early_recursion_flag:
17345 .long 0
17346+ .previous
17347
17348+ .section .rodata,"a",@progbits
17349 early_idt_msg:
17350 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17351 early_idt_ripmsg:
17352 .asciz "RIP %s\n"
17353+ .previous
17354 #endif /* CONFIG_EARLY_PRINTK */
17355- .previous
17356
17357+ .section .rodata,"a",@progbits
17358 #define NEXT_PAGE(name) \
17359 .balign PAGE_SIZE; \
17360 ENTRY(name)
17361@@ -338,7 +348,6 @@ ENTRY(name)
17362 i = i + 1 ; \
17363 .endr
17364
17365- .data
17366 /*
17367 * This default setting generates an ident mapping at address 0x100000
17368 * and a mapping for the kernel that precisely maps virtual address
17369@@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
17370 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17371 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17372 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17373+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
17374+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17375+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
17376+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
17377+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17378+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17379 .org init_level4_pgt + L4_START_KERNEL*8, 0
17380 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17381 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17382
17383+#ifdef CONFIG_PAX_PER_CPU_PGD
17384+NEXT_PAGE(cpu_pgd)
17385+ .rept NR_CPUS
17386+ .fill 512,8,0
17387+ .endr
17388+#endif
17389+
17390 NEXT_PAGE(level3_ident_pgt)
17391 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17392+#ifdef CONFIG_XEN
17393 .fill 511,8,0
17394+#else
17395+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17396+ .fill 510,8,0
17397+#endif
17398+
17399+NEXT_PAGE(level3_vmalloc_start_pgt)
17400+ .fill 512,8,0
17401+
17402+NEXT_PAGE(level3_vmalloc_end_pgt)
17403+ .fill 512,8,0
17404+
17405+NEXT_PAGE(level3_vmemmap_pgt)
17406+ .fill L3_VMEMMAP_START,8,0
17407+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17408
17409 NEXT_PAGE(level3_kernel_pgt)
17410 .fill L3_START_KERNEL,8,0
17411@@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
17412 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17413 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17414
17415+NEXT_PAGE(level2_vmemmap_pgt)
17416+ .fill 512,8,0
17417+
17418 NEXT_PAGE(level2_fixmap_pgt)
17419- .fill 506,8,0
17420- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17421- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17422- .fill 5,8,0
17423+ .fill 507,8,0
17424+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17425+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17426+ .fill 4,8,0
17427
17428-NEXT_PAGE(level1_fixmap_pgt)
17429+NEXT_PAGE(level1_vsyscall_pgt)
17430 .fill 512,8,0
17431
17432-NEXT_PAGE(level2_ident_pgt)
17433- /* Since I easily can, map the first 1G.
17434+ /* Since I easily can, map the first 2G.
17435 * Don't set NX because code runs from these pages.
17436 */
17437- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17438+NEXT_PAGE(level2_ident_pgt)
17439+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17440
17441 NEXT_PAGE(level2_kernel_pgt)
17442 /*
17443@@ -389,37 +429,59 @@ NEXT_PAGE(level2_kernel_pgt)
17444 * If you want to increase this then increase MODULES_VADDR
17445 * too.)
17446 */
17447- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17448- KERNEL_IMAGE_SIZE/PMD_SIZE)
17449-
17450-NEXT_PAGE(level2_spare_pgt)
17451- .fill 512, 8, 0
17452+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17453
17454 #undef PMDS
17455 #undef NEXT_PAGE
17456
17457- .data
17458+ .align PAGE_SIZE
17459+ENTRY(cpu_gdt_table)
17460+ .rept NR_CPUS
17461+ .quad 0x0000000000000000 /* NULL descriptor */
17462+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17463+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
17464+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
17465+ .quad 0x00cffb000000ffff /* __USER32_CS */
17466+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17467+ .quad 0x00affb000000ffff /* __USER_CS */
17468+
17469+#ifdef CONFIG_PAX_KERNEXEC
17470+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17471+#else
17472+ .quad 0x0 /* unused */
17473+#endif
17474+
17475+ .quad 0,0 /* TSS */
17476+ .quad 0,0 /* LDT */
17477+ .quad 0,0,0 /* three TLS descriptors */
17478+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
17479+ /* asm/segment.h:GDT_ENTRIES must match this */
17480+
17481+ /* zero the remaining page */
17482+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17483+ .endr
17484+
17485 .align 16
17486 .globl early_gdt_descr
17487 early_gdt_descr:
17488 .word GDT_ENTRIES*8-1
17489 early_gdt_descr_base:
17490- .quad INIT_PER_CPU_VAR(gdt_page)
17491+ .quad cpu_gdt_table
17492
17493 ENTRY(phys_base)
17494 /* This must match the first entry in level2_kernel_pgt */
17495 .quad 0x0000000000000000
17496
17497 #include "../../x86/xen/xen-head.S"
17498-
17499- .section .bss, "aw", @nobits
17500+
17501+ .section .rodata,"a",@progbits
17502 .align L1_CACHE_BYTES
17503 ENTRY(idt_table)
17504- .skip IDT_ENTRIES * 16
17505+ .fill 512,8,0
17506
17507 .align L1_CACHE_BYTES
17508 ENTRY(nmi_idt_table)
17509- .skip IDT_ENTRIES * 16
17510+ .fill 512,8,0
17511
17512 __PAGE_ALIGNED_BSS
17513 .align PAGE_SIZE
17514diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17515index 9c3bd4a..e1d9b35 100644
17516--- a/arch/x86/kernel/i386_ksyms_32.c
17517+++ b/arch/x86/kernel/i386_ksyms_32.c
17518@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17519 EXPORT_SYMBOL(cmpxchg8b_emu);
17520 #endif
17521
17522+EXPORT_SYMBOL_GPL(cpu_gdt_table);
17523+
17524 /* Networking helper routines. */
17525 EXPORT_SYMBOL(csum_partial_copy_generic);
17526+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17527+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17528
17529 EXPORT_SYMBOL(__get_user_1);
17530 EXPORT_SYMBOL(__get_user_2);
17531@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17532
17533 EXPORT_SYMBOL(csum_partial);
17534 EXPORT_SYMBOL(empty_zero_page);
17535+
17536+#ifdef CONFIG_PAX_KERNEXEC
17537+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17538+#endif
17539diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
17540index 2d6e649..df6e1af 100644
17541--- a/arch/x86/kernel/i387.c
17542+++ b/arch/x86/kernel/i387.c
17543@@ -59,7 +59,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
17544 static inline bool interrupted_user_mode(void)
17545 {
17546 struct pt_regs *regs = get_irq_regs();
17547- return regs && user_mode_vm(regs);
17548+ return regs && user_mode(regs);
17549 }
17550
17551 /*
17552diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17553index 36d1853..bf25736 100644
17554--- a/arch/x86/kernel/i8259.c
17555+++ b/arch/x86/kernel/i8259.c
17556@@ -209,7 +209,7 @@ spurious_8259A_irq:
17557 "spurious 8259A interrupt: IRQ%d.\n", irq);
17558 spurious_irq_mask |= irqmask;
17559 }
17560- atomic_inc(&irq_err_count);
17561+ atomic_inc_unchecked(&irq_err_count);
17562 /*
17563 * Theoretically we do not have to handle this IRQ,
17564 * but in Linux this does not cause problems and is
17565diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17566index 43e9ccf..44ccf6f 100644
17567--- a/arch/x86/kernel/init_task.c
17568+++ b/arch/x86/kernel/init_task.c
17569@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17570 * way process stacks are handled. This is done by having a special
17571 * "init_task" linker map entry..
17572 */
17573-union thread_union init_thread_union __init_task_data =
17574- { INIT_THREAD_INFO(init_task) };
17575+union thread_union init_thread_union __init_task_data;
17576
17577 /*
17578 * Initial task structure.
17579@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17580 * section. Since TSS's are completely CPU-local, we want them
17581 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17582 */
17583-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17584-
17585+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17586+EXPORT_SYMBOL(init_tss);
17587diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17588index 8c96897..be66bfa 100644
17589--- a/arch/x86/kernel/ioport.c
17590+++ b/arch/x86/kernel/ioport.c
17591@@ -6,6 +6,7 @@
17592 #include <linux/sched.h>
17593 #include <linux/kernel.h>
17594 #include <linux/capability.h>
17595+#include <linux/security.h>
17596 #include <linux/errno.h>
17597 #include <linux/types.h>
17598 #include <linux/ioport.h>
17599@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17600
17601 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17602 return -EINVAL;
17603+#ifdef CONFIG_GRKERNSEC_IO
17604+ if (turn_on && grsec_disable_privio) {
17605+ gr_handle_ioperm();
17606+ return -EPERM;
17607+ }
17608+#endif
17609 if (turn_on && !capable(CAP_SYS_RAWIO))
17610 return -EPERM;
17611
17612@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17613 * because the ->io_bitmap_max value must match the bitmap
17614 * contents:
17615 */
17616- tss = &per_cpu(init_tss, get_cpu());
17617+ tss = init_tss + get_cpu();
17618
17619 if (turn_on)
17620 bitmap_clear(t->io_bitmap_ptr, from, num);
17621@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
17622 return -EINVAL;
17623 /* Trying to gain more privileges? */
17624 if (level > old) {
17625+#ifdef CONFIG_GRKERNSEC_IO
17626+ if (grsec_disable_privio) {
17627+ gr_handle_iopl();
17628+ return -EPERM;
17629+ }
17630+#endif
17631 if (!capable(CAP_SYS_RAWIO))
17632 return -EPERM;
17633 }
17634diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17635index 3dafc60..aa8e9c4 100644
17636--- a/arch/x86/kernel/irq.c
17637+++ b/arch/x86/kernel/irq.c
17638@@ -18,7 +18,7 @@
17639 #include <asm/mce.h>
17640 #include <asm/hw_irq.h>
17641
17642-atomic_t irq_err_count;
17643+atomic_unchecked_t irq_err_count;
17644
17645 /* Function pointer for generic interrupt vector handling */
17646 void (*x86_platform_ipi_callback)(void) = NULL;
17647@@ -121,9 +121,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
17648 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17649 seq_printf(p, " Machine check polls\n");
17650 #endif
17651- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17652+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17653 #if defined(CONFIG_X86_IO_APIC)
17654- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17655+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17656 #endif
17657 return 0;
17658 }
17659@@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17660
17661 u64 arch_irq_stat(void)
17662 {
17663- u64 sum = atomic_read(&irq_err_count);
17664+ u64 sum = atomic_read_unchecked(&irq_err_count);
17665
17666 #ifdef CONFIG_X86_IO_APIC
17667- sum += atomic_read(&irq_mis_count);
17668+ sum += atomic_read_unchecked(&irq_mis_count);
17669 #endif
17670 return sum;
17671 }
17672diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17673index 58b7f27..e112d08 100644
17674--- a/arch/x86/kernel/irq_32.c
17675+++ b/arch/x86/kernel/irq_32.c
17676@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
17677 __asm__ __volatile__("andl %%esp,%0" :
17678 "=r" (sp) : "0" (THREAD_SIZE - 1));
17679
17680- return sp < (sizeof(struct thread_info) + STACK_WARN);
17681+ return sp < STACK_WARN;
17682 }
17683
17684 static void print_stack_overflow(void)
17685@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
17686 * per-CPU IRQ handling contexts (thread information and stack)
17687 */
17688 union irq_ctx {
17689- struct thread_info tinfo;
17690- u32 stack[THREAD_SIZE/sizeof(u32)];
17691+ unsigned long previous_esp;
17692+ u32 stack[THREAD_SIZE/sizeof(u32)];
17693 } __attribute__((aligned(THREAD_SIZE)));
17694
17695 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17696@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
17697 static inline int
17698 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17699 {
17700- union irq_ctx *curctx, *irqctx;
17701+ union irq_ctx *irqctx;
17702 u32 *isp, arg1, arg2;
17703
17704- curctx = (union irq_ctx *) current_thread_info();
17705 irqctx = __this_cpu_read(hardirq_ctx);
17706
17707 /*
17708@@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17709 * handler) we can't do that and just have to keep using the
17710 * current stack (which is the irq stack already after all)
17711 */
17712- if (unlikely(curctx == irqctx))
17713+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17714 return 0;
17715
17716 /* build the stack frame on the IRQ stack */
17717- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17718- irqctx->tinfo.task = curctx->tinfo.task;
17719- irqctx->tinfo.previous_esp = current_stack_pointer;
17720+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17721+ irqctx->previous_esp = current_stack_pointer;
17722
17723- /* Copy the preempt_count so that the [soft]irq checks work. */
17724- irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
17725+#ifdef CONFIG_PAX_MEMORY_UDEREF
17726+ __set_fs(MAKE_MM_SEG(0));
17727+#endif
17728
17729 if (unlikely(overflow))
17730 call_on_stack(print_stack_overflow, isp);
17731@@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17732 : "0" (irq), "1" (desc), "2" (isp),
17733 "D" (desc->handle_irq)
17734 : "memory", "cc", "ecx");
17735+
17736+#ifdef CONFIG_PAX_MEMORY_UDEREF
17737+ __set_fs(current_thread_info()->addr_limit);
17738+#endif
17739+
17740 return 1;
17741 }
17742
17743@@ -121,29 +125,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17744 */
17745 void __cpuinit irq_ctx_init(int cpu)
17746 {
17747- union irq_ctx *irqctx;
17748-
17749 if (per_cpu(hardirq_ctx, cpu))
17750 return;
17751
17752- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17753- THREAD_FLAGS,
17754- THREAD_ORDER));
17755- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17756- irqctx->tinfo.cpu = cpu;
17757- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17758- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17759-
17760- per_cpu(hardirq_ctx, cpu) = irqctx;
17761-
17762- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17763- THREAD_FLAGS,
17764- THREAD_ORDER));
17765- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17766- irqctx->tinfo.cpu = cpu;
17767- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17768-
17769- per_cpu(softirq_ctx, cpu) = irqctx;
17770+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17771+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17772
17773 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17774 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17775@@ -152,7 +138,6 @@ void __cpuinit irq_ctx_init(int cpu)
17776 asmlinkage void do_softirq(void)
17777 {
17778 unsigned long flags;
17779- struct thread_info *curctx;
17780 union irq_ctx *irqctx;
17781 u32 *isp;
17782
17783@@ -162,15 +147,22 @@ asmlinkage void do_softirq(void)
17784 local_irq_save(flags);
17785
17786 if (local_softirq_pending()) {
17787- curctx = current_thread_info();
17788 irqctx = __this_cpu_read(softirq_ctx);
17789- irqctx->tinfo.task = curctx->task;
17790- irqctx->tinfo.previous_esp = current_stack_pointer;
17791+ irqctx->previous_esp = current_stack_pointer;
17792
17793 /* build the stack frame on the softirq stack */
17794- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17795+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17796+
17797+#ifdef CONFIG_PAX_MEMORY_UDEREF
17798+ __set_fs(MAKE_MM_SEG(0));
17799+#endif
17800
17801 call_on_stack(__do_softirq, isp);
17802+
17803+#ifdef CONFIG_PAX_MEMORY_UDEREF
17804+ __set_fs(current_thread_info()->addr_limit);
17805+#endif
17806+
17807 /*
17808 * Shouldn't happen, we returned above if in_interrupt():
17809 */
17810@@ -191,7 +183,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
17811 if (unlikely(!desc))
17812 return false;
17813
17814- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
17815+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
17816 if (unlikely(overflow))
17817 print_stack_overflow();
17818 desc->handle_irq(irq, desc);
17819diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
17820index d04d3ec..ea4b374 100644
17821--- a/arch/x86/kernel/irq_64.c
17822+++ b/arch/x86/kernel/irq_64.c
17823@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
17824 u64 estack_top, estack_bottom;
17825 u64 curbase = (u64)task_stack_page(current);
17826
17827- if (user_mode_vm(regs))
17828+ if (user_mode(regs))
17829 return;
17830
17831 if (regs->sp >= curbase + sizeof(struct thread_info) +
17832diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
17833index 1d5d31e..ab846ed 100644
17834--- a/arch/x86/kernel/kdebugfs.c
17835+++ b/arch/x86/kernel/kdebugfs.c
17836@@ -28,6 +28,8 @@ struct setup_data_node {
17837 };
17838
17839 static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17840+ size_t count, loff_t *ppos) __size_overflow(3);
17841+static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17842 size_t count, loff_t *ppos)
17843 {
17844 struct setup_data_node *node = file->private_data;
17845diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17846index 8bfb614..2b3b35f 100644
17847--- a/arch/x86/kernel/kgdb.c
17848+++ b/arch/x86/kernel/kgdb.c
17849@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
17850 #ifdef CONFIG_X86_32
17851 switch (regno) {
17852 case GDB_SS:
17853- if (!user_mode_vm(regs))
17854+ if (!user_mode(regs))
17855 *(unsigned long *)mem = __KERNEL_DS;
17856 break;
17857 case GDB_SP:
17858- if (!user_mode_vm(regs))
17859+ if (!user_mode(regs))
17860 *(unsigned long *)mem = kernel_stack_pointer(regs);
17861 break;
17862 case GDB_GS:
17863@@ -476,12 +476,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17864 case 'k':
17865 /* clear the trace bit */
17866 linux_regs->flags &= ~X86_EFLAGS_TF;
17867- atomic_set(&kgdb_cpu_doing_single_step, -1);
17868+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17869
17870 /* set the trace bit if we're stepping */
17871 if (remcomInBuffer[0] == 's') {
17872 linux_regs->flags |= X86_EFLAGS_TF;
17873- atomic_set(&kgdb_cpu_doing_single_step,
17874+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17875 raw_smp_processor_id());
17876 }
17877
17878@@ -546,7 +546,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17879
17880 switch (cmd) {
17881 case DIE_DEBUG:
17882- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
17883+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
17884 if (user_mode(regs))
17885 return single_step_cont(regs, args);
17886 break;
17887diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
17888index c5e410e..da6aaf9 100644
17889--- a/arch/x86/kernel/kprobes-opt.c
17890+++ b/arch/x86/kernel/kprobes-opt.c
17891@@ -338,7 +338,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17892 * Verify if the address gap is in 2GB range, because this uses
17893 * a relative jump.
17894 */
17895- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
17896+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
17897 if (abs(rel) > 0x7fffffff)
17898 return -ERANGE;
17899
17900@@ -359,11 +359,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17901 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
17902
17903 /* Set probe function call */
17904- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
17905+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
17906
17907 /* Set returning jmp instruction at the tail of out-of-line buffer */
17908 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
17909- (u8 *)op->kp.addr + op->optinsn.size);
17910+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
17911
17912 flush_icache_range((unsigned long) buf,
17913 (unsigned long) buf + TMPL_END_IDX +
17914@@ -385,7 +385,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
17915 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
17916
17917 /* Backup instructions which will be replaced by jump address */
17918- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
17919+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
17920 RELATIVE_ADDR_SIZE);
17921
17922 insn_buf[0] = RELATIVEJUMP_OPCODE;
17923diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17924index e213fc8..d783ba4 100644
17925--- a/arch/x86/kernel/kprobes.c
17926+++ b/arch/x86/kernel/kprobes.c
17927@@ -120,8 +120,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
17928 } __attribute__((packed)) *insn;
17929
17930 insn = (struct __arch_relative_insn *)from;
17931+
17932+ pax_open_kernel();
17933 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
17934 insn->op = op;
17935+ pax_close_kernel();
17936 }
17937
17938 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
17939@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
17940 kprobe_opcode_t opcode;
17941 kprobe_opcode_t *orig_opcodes = opcodes;
17942
17943- if (search_exception_tables((unsigned long)opcodes))
17944+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17945 return 0; /* Page fault may occur on this address. */
17946
17947 retry:
17948@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
17949 /* Another subsystem puts a breakpoint, failed to recover */
17950 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
17951 return 0;
17952+ pax_open_kernel();
17953 memcpy(dest, insn.kaddr, insn.length);
17954+ pax_close_kernel();
17955
17956 #ifdef CONFIG_X86_64
17957 if (insn_rip_relative(&insn)) {
17958@@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
17959 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
17960 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
17961 disp = (u8 *) dest + insn_offset_displacement(&insn);
17962+ pax_open_kernel();
17963 *(s32 *) disp = (s32) newdisp;
17964+ pax_close_kernel();
17965 }
17966 #endif
17967 return insn.length;
17968@@ -485,7 +492,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
17969 * nor set current_kprobe, because it doesn't use single
17970 * stepping.
17971 */
17972- regs->ip = (unsigned long)p->ainsn.insn;
17973+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17974 preempt_enable_no_resched();
17975 return;
17976 }
17977@@ -504,7 +511,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
17978 if (p->opcode == BREAKPOINT_INSTRUCTION)
17979 regs->ip = (unsigned long)p->addr;
17980 else
17981- regs->ip = (unsigned long)p->ainsn.insn;
17982+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17983 }
17984
17985 /*
17986@@ -583,7 +590,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
17987 setup_singlestep(p, regs, kcb, 0);
17988 return 1;
17989 }
17990- } else if (*addr != BREAKPOINT_INSTRUCTION) {
17991+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
17992 /*
17993 * The breakpoint instruction was removed right
17994 * after we hit it. Another cpu has removed
17995@@ -628,6 +635,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
17996 " movq %rax, 152(%rsp)\n"
17997 RESTORE_REGS_STRING
17998 " popfq\n"
17999+#ifdef KERNEXEC_PLUGIN
18000+ " btsq $63,(%rsp)\n"
18001+#endif
18002 #else
18003 " pushf\n"
18004 SAVE_REGS_STRING
18005@@ -765,7 +775,7 @@ static void __kprobes
18006 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
18007 {
18008 unsigned long *tos = stack_addr(regs);
18009- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
18010+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
18011 unsigned long orig_ip = (unsigned long)p->addr;
18012 kprobe_opcode_t *insn = p->ainsn.insn;
18013
18014@@ -947,7 +957,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
18015 struct die_args *args = data;
18016 int ret = NOTIFY_DONE;
18017
18018- if (args->regs && user_mode_vm(args->regs))
18019+ if (args->regs && user_mode(args->regs))
18020 return ret;
18021
18022 switch (val) {
18023diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
18024index ebc9873..1b9724b 100644
18025--- a/arch/x86/kernel/ldt.c
18026+++ b/arch/x86/kernel/ldt.c
18027@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
18028 if (reload) {
18029 #ifdef CONFIG_SMP
18030 preempt_disable();
18031- load_LDT(pc);
18032+ load_LDT_nolock(pc);
18033 if (!cpumask_equal(mm_cpumask(current->mm),
18034 cpumask_of(smp_processor_id())))
18035 smp_call_function(flush_ldt, current->mm, 1);
18036 preempt_enable();
18037 #else
18038- load_LDT(pc);
18039+ load_LDT_nolock(pc);
18040 #endif
18041 }
18042 if (oldsize) {
18043@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
18044 return err;
18045
18046 for (i = 0; i < old->size; i++)
18047- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
18048+ write_ldt_entry(new->ldt, i, old->ldt + i);
18049 return 0;
18050 }
18051
18052@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
18053 retval = copy_ldt(&mm->context, &old_mm->context);
18054 mutex_unlock(&old_mm->context.lock);
18055 }
18056+
18057+ if (tsk == current) {
18058+ mm->context.vdso = 0;
18059+
18060+#ifdef CONFIG_X86_32
18061+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18062+ mm->context.user_cs_base = 0UL;
18063+ mm->context.user_cs_limit = ~0UL;
18064+
18065+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18066+ cpus_clear(mm->context.cpu_user_cs_mask);
18067+#endif
18068+
18069+#endif
18070+#endif
18071+
18072+ }
18073+
18074 return retval;
18075 }
18076
18077@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
18078 }
18079 }
18080
18081+#ifdef CONFIG_PAX_SEGMEXEC
18082+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
18083+ error = -EINVAL;
18084+ goto out_unlock;
18085+ }
18086+#endif
18087+
18088 fill_ldt(&ldt, &ldt_info);
18089 if (oldmode)
18090 ldt.avl = 0;
18091diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18092index 5b19e4d..6476a76 100644
18093--- a/arch/x86/kernel/machine_kexec_32.c
18094+++ b/arch/x86/kernel/machine_kexec_32.c
18095@@ -26,7 +26,7 @@
18096 #include <asm/cacheflush.h>
18097 #include <asm/debugreg.h>
18098
18099-static void set_idt(void *newidt, __u16 limit)
18100+static void set_idt(struct desc_struct *newidt, __u16 limit)
18101 {
18102 struct desc_ptr curidt;
18103
18104@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
18105 }
18106
18107
18108-static void set_gdt(void *newgdt, __u16 limit)
18109+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18110 {
18111 struct desc_ptr curgdt;
18112
18113@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
18114 }
18115
18116 control_page = page_address(image->control_code_page);
18117- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18118+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18119
18120 relocate_kernel_ptr = control_page;
18121 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18122diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18123index 0327e2b..e43737b 100644
18124--- a/arch/x86/kernel/microcode_intel.c
18125+++ b/arch/x86/kernel/microcode_intel.c
18126@@ -430,13 +430,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18127
18128 static int get_ucode_user(void *to, const void *from, size_t n)
18129 {
18130- return copy_from_user(to, from, n);
18131+ return copy_from_user(to, (const void __force_user *)from, n);
18132 }
18133
18134 static enum ucode_state
18135 request_microcode_user(int cpu, const void __user *buf, size_t size)
18136 {
18137- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18138+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18139 }
18140
18141 static void microcode_fini_cpu(int cpu)
18142diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18143index f21fd94..61565cd 100644
18144--- a/arch/x86/kernel/module.c
18145+++ b/arch/x86/kernel/module.c
18146@@ -35,15 +35,60 @@
18147 #define DEBUGP(fmt...)
18148 #endif
18149
18150-void *module_alloc(unsigned long size)
18151+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
18152 {
18153- if (PAGE_ALIGN(size) > MODULES_LEN)
18154+ if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
18155 return NULL;
18156 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
18157- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
18158+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
18159 -1, __builtin_return_address(0));
18160 }
18161
18162+void *module_alloc(unsigned long size)
18163+{
18164+
18165+#ifdef CONFIG_PAX_KERNEXEC
18166+ return __module_alloc(size, PAGE_KERNEL);
18167+#else
18168+ return __module_alloc(size, PAGE_KERNEL_EXEC);
18169+#endif
18170+
18171+}
18172+
18173+#ifdef CONFIG_PAX_KERNEXEC
18174+#ifdef CONFIG_X86_32
18175+void *module_alloc_exec(unsigned long size)
18176+{
18177+ struct vm_struct *area;
18178+
18179+ if (size == 0)
18180+ return NULL;
18181+
18182+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18183+ return area ? area->addr : NULL;
18184+}
18185+EXPORT_SYMBOL(module_alloc_exec);
18186+
18187+void module_free_exec(struct module *mod, void *module_region)
18188+{
18189+ vunmap(module_region);
18190+}
18191+EXPORT_SYMBOL(module_free_exec);
18192+#else
18193+void module_free_exec(struct module *mod, void *module_region)
18194+{
18195+ module_free(mod, module_region);
18196+}
18197+EXPORT_SYMBOL(module_free_exec);
18198+
18199+void *module_alloc_exec(unsigned long size)
18200+{
18201+ return __module_alloc(size, PAGE_KERNEL_RX);
18202+}
18203+EXPORT_SYMBOL(module_alloc_exec);
18204+#endif
18205+#endif
18206+
18207 #ifdef CONFIG_X86_32
18208 int apply_relocate(Elf32_Shdr *sechdrs,
18209 const char *strtab,
18210@@ -54,14 +99,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18211 unsigned int i;
18212 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18213 Elf32_Sym *sym;
18214- uint32_t *location;
18215+ uint32_t *plocation, location;
18216
18217 DEBUGP("Applying relocate section %u to %u\n", relsec,
18218 sechdrs[relsec].sh_info);
18219 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18220 /* This is where to make the change */
18221- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18222- + rel[i].r_offset;
18223+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18224+ location = (uint32_t)plocation;
18225+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18226+ plocation = ktla_ktva((void *)plocation);
18227 /* This is the symbol it is referring to. Note that all
18228 undefined symbols have been resolved. */
18229 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18230@@ -70,11 +117,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18231 switch (ELF32_R_TYPE(rel[i].r_info)) {
18232 case R_386_32:
18233 /* We add the value into the location given */
18234- *location += sym->st_value;
18235+ pax_open_kernel();
18236+ *plocation += sym->st_value;
18237+ pax_close_kernel();
18238 break;
18239 case R_386_PC32:
18240 /* Add the value, subtract its postition */
18241- *location += sym->st_value - (uint32_t)location;
18242+ pax_open_kernel();
18243+ *plocation += sym->st_value - location;
18244+ pax_close_kernel();
18245 break;
18246 default:
18247 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18248@@ -119,21 +170,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18249 case R_X86_64_NONE:
18250 break;
18251 case R_X86_64_64:
18252+ pax_open_kernel();
18253 *(u64 *)loc = val;
18254+ pax_close_kernel();
18255 break;
18256 case R_X86_64_32:
18257+ pax_open_kernel();
18258 *(u32 *)loc = val;
18259+ pax_close_kernel();
18260 if (val != *(u32 *)loc)
18261 goto overflow;
18262 break;
18263 case R_X86_64_32S:
18264+ pax_open_kernel();
18265 *(s32 *)loc = val;
18266+ pax_close_kernel();
18267 if ((s64)val != *(s32 *)loc)
18268 goto overflow;
18269 break;
18270 case R_X86_64_PC32:
18271 val -= (u64)loc;
18272+ pax_open_kernel();
18273 *(u32 *)loc = val;
18274+ pax_close_kernel();
18275+
18276 #if 0
18277 if ((s64)val != *(s32 *)loc)
18278 goto overflow;
18279diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
18280index 32856fa..ce95eaa 100644
18281--- a/arch/x86/kernel/nmi.c
18282+++ b/arch/x86/kernel/nmi.c
18283@@ -507,6 +507,17 @@ static inline void nmi_nesting_postprocess(void)
18284 dotraplinkage notrace __kprobes void
18285 do_nmi(struct pt_regs *regs, long error_code)
18286 {
18287+
18288+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18289+ if (!user_mode(regs)) {
18290+ unsigned long cs = regs->cs & 0xFFFF;
18291+ unsigned long ip = ktva_ktla(regs->ip);
18292+
18293+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
18294+ regs->ip = ip;
18295+ }
18296+#endif
18297+
18298 nmi_nesting_preprocess(regs);
18299
18300 nmi_enter();
18301diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18302index 676b8c7..870ba04 100644
18303--- a/arch/x86/kernel/paravirt-spinlocks.c
18304+++ b/arch/x86/kernel/paravirt-spinlocks.c
18305@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
18306 arch_spin_lock(lock);
18307 }
18308
18309-struct pv_lock_ops pv_lock_ops = {
18310+struct pv_lock_ops pv_lock_ops __read_only = {
18311 #ifdef CONFIG_SMP
18312 .spin_is_locked = __ticket_spin_is_locked,
18313 .spin_is_contended = __ticket_spin_is_contended,
18314diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18315index ab13760..01218e0 100644
18316--- a/arch/x86/kernel/paravirt.c
18317+++ b/arch/x86/kernel/paravirt.c
18318@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
18319 {
18320 return x;
18321 }
18322+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18323+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18324+#endif
18325
18326 void __init default_banner(void)
18327 {
18328@@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18329 if (opfunc == NULL)
18330 /* If there's no function, patch it with a ud2a (BUG) */
18331 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18332- else if (opfunc == _paravirt_nop)
18333+ else if (opfunc == (void *)_paravirt_nop)
18334 /* If the operation is a nop, then nop the callsite */
18335 ret = paravirt_patch_nop();
18336
18337 /* identity functions just return their single argument */
18338- else if (opfunc == _paravirt_ident_32)
18339+ else if (opfunc == (void *)_paravirt_ident_32)
18340 ret = paravirt_patch_ident_32(insnbuf, len);
18341- else if (opfunc == _paravirt_ident_64)
18342+ else if (opfunc == (void *)_paravirt_ident_64)
18343 ret = paravirt_patch_ident_64(insnbuf, len);
18344+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18345+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18346+ ret = paravirt_patch_ident_64(insnbuf, len);
18347+#endif
18348
18349 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18350 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18351@@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18352 if (insn_len > len || start == NULL)
18353 insn_len = len;
18354 else
18355- memcpy(insnbuf, start, insn_len);
18356+ memcpy(insnbuf, ktla_ktva(start), insn_len);
18357
18358 return insn_len;
18359 }
18360@@ -304,7 +311,7 @@ void arch_flush_lazy_mmu_mode(void)
18361 preempt_enable();
18362 }
18363
18364-struct pv_info pv_info = {
18365+struct pv_info pv_info __read_only = {
18366 .name = "bare hardware",
18367 .paravirt_enabled = 0,
18368 .kernel_rpl = 0,
18369@@ -315,16 +322,16 @@ struct pv_info pv_info = {
18370 #endif
18371 };
18372
18373-struct pv_init_ops pv_init_ops = {
18374+struct pv_init_ops pv_init_ops __read_only = {
18375 .patch = native_patch,
18376 };
18377
18378-struct pv_time_ops pv_time_ops = {
18379+struct pv_time_ops pv_time_ops __read_only = {
18380 .sched_clock = native_sched_clock,
18381 .steal_clock = native_steal_clock,
18382 };
18383
18384-struct pv_irq_ops pv_irq_ops = {
18385+struct pv_irq_ops pv_irq_ops __read_only = {
18386 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18387 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18388 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18389@@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
18390 #endif
18391 };
18392
18393-struct pv_cpu_ops pv_cpu_ops = {
18394+struct pv_cpu_ops pv_cpu_ops __read_only = {
18395 .cpuid = native_cpuid,
18396 .get_debugreg = native_get_debugreg,
18397 .set_debugreg = native_set_debugreg,
18398@@ -397,21 +404,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18399 .end_context_switch = paravirt_nop,
18400 };
18401
18402-struct pv_apic_ops pv_apic_ops = {
18403+struct pv_apic_ops pv_apic_ops __read_only = {
18404 #ifdef CONFIG_X86_LOCAL_APIC
18405 .startup_ipi_hook = paravirt_nop,
18406 #endif
18407 };
18408
18409-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18410+#ifdef CONFIG_X86_32
18411+#ifdef CONFIG_X86_PAE
18412+/* 64-bit pagetable entries */
18413+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18414+#else
18415 /* 32-bit pagetable entries */
18416 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18417+#endif
18418 #else
18419 /* 64-bit pagetable entries */
18420 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18421 #endif
18422
18423-struct pv_mmu_ops pv_mmu_ops = {
18424+struct pv_mmu_ops pv_mmu_ops __read_only = {
18425
18426 .read_cr2 = native_read_cr2,
18427 .write_cr2 = native_write_cr2,
18428@@ -461,6 +473,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18429 .make_pud = PTE_IDENT,
18430
18431 .set_pgd = native_set_pgd,
18432+ .set_pgd_batched = native_set_pgd_batched,
18433 #endif
18434 #endif /* PAGETABLE_LEVELS >= 3 */
18435
18436@@ -480,6 +493,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18437 },
18438
18439 .set_fixmap = native_set_fixmap,
18440+
18441+#ifdef CONFIG_PAX_KERNEXEC
18442+ .pax_open_kernel = native_pax_open_kernel,
18443+ .pax_close_kernel = native_pax_close_kernel,
18444+#endif
18445+
18446 };
18447
18448 EXPORT_SYMBOL_GPL(pv_time_ops);
18449diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
18450index 35ccf75..7a15747 100644
18451--- a/arch/x86/kernel/pci-iommu_table.c
18452+++ b/arch/x86/kernel/pci-iommu_table.c
18453@@ -2,7 +2,7 @@
18454 #include <asm/iommu_table.h>
18455 #include <linux/string.h>
18456 #include <linux/kallsyms.h>
18457-
18458+#include <linux/sched.h>
18459
18460 #define DEBUG 1
18461
18462diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18463index 1d92a5a..7bc8c29 100644
18464--- a/arch/x86/kernel/process.c
18465+++ b/arch/x86/kernel/process.c
18466@@ -69,16 +69,33 @@ void free_thread_xstate(struct task_struct *tsk)
18467
18468 void free_thread_info(struct thread_info *ti)
18469 {
18470- free_thread_xstate(ti->task);
18471 free_pages((unsigned long)ti, THREAD_ORDER);
18472 }
18473
18474+static struct kmem_cache *task_struct_cachep;
18475+
18476 void arch_task_cache_init(void)
18477 {
18478- task_xstate_cachep =
18479- kmem_cache_create("task_xstate", xstate_size,
18480+ /* create a slab on which task_structs can be allocated */
18481+ task_struct_cachep =
18482+ kmem_cache_create("task_struct", sizeof(struct task_struct),
18483+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18484+
18485+ task_xstate_cachep =
18486+ kmem_cache_create("task_xstate", xstate_size,
18487 __alignof__(union thread_xstate),
18488- SLAB_PANIC | SLAB_NOTRACK, NULL);
18489+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18490+}
18491+
18492+struct task_struct *alloc_task_struct_node(int node)
18493+{
18494+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
18495+}
18496+
18497+void free_task_struct(struct task_struct *task)
18498+{
18499+ free_thread_xstate(task);
18500+ kmem_cache_free(task_struct_cachep, task);
18501 }
18502
18503 /*
18504@@ -91,7 +108,7 @@ void exit_thread(void)
18505 unsigned long *bp = t->io_bitmap_ptr;
18506
18507 if (bp) {
18508- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18509+ struct tss_struct *tss = init_tss + get_cpu();
18510
18511 t->io_bitmap_ptr = NULL;
18512 clear_thread_flag(TIF_IO_BITMAP);
18513@@ -127,7 +144,7 @@ void show_regs_common(void)
18514
18515 printk(KERN_CONT "\n");
18516 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
18517- current->pid, current->comm, print_tainted(),
18518+ task_pid_nr(current), current->comm, print_tainted(),
18519 init_utsname()->release,
18520 (int)strcspn(init_utsname()->version, " "),
18521 init_utsname()->version);
18522@@ -141,6 +158,9 @@ void flush_thread(void)
18523 {
18524 struct task_struct *tsk = current;
18525
18526+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18527+ loadsegment(gs, 0);
18528+#endif
18529 flush_ptrace_hw_breakpoint(tsk);
18530 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
18531 /*
18532@@ -303,10 +323,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18533 regs.di = (unsigned long) arg;
18534
18535 #ifdef CONFIG_X86_32
18536- regs.ds = __USER_DS;
18537- regs.es = __USER_DS;
18538+ regs.ds = __KERNEL_DS;
18539+ regs.es = __KERNEL_DS;
18540 regs.fs = __KERNEL_PERCPU;
18541- regs.gs = __KERNEL_STACK_CANARY;
18542+ savesegment(gs, regs.gs);
18543 #else
18544 regs.ss = __KERNEL_DS;
18545 #endif
18546@@ -392,7 +412,7 @@ static void __exit_idle(void)
18547 void exit_idle(void)
18548 {
18549 /* idle loop has pid 0 */
18550- if (current->pid)
18551+ if (task_pid_nr(current))
18552 return;
18553 __exit_idle();
18554 }
18555@@ -501,7 +521,7 @@ bool set_pm_idle_to_default(void)
18556
18557 return ret;
18558 }
18559-void stop_this_cpu(void *dummy)
18560+__noreturn void stop_this_cpu(void *dummy)
18561 {
18562 local_irq_disable();
18563 /*
18564@@ -743,16 +763,37 @@ static int __init idle_setup(char *str)
18565 }
18566 early_param("idle", idle_setup);
18567
18568-unsigned long arch_align_stack(unsigned long sp)
18569+#ifdef CONFIG_PAX_RANDKSTACK
18570+void pax_randomize_kstack(struct pt_regs *regs)
18571 {
18572- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18573- sp -= get_random_int() % 8192;
18574- return sp & ~0xf;
18575-}
18576+ struct thread_struct *thread = &current->thread;
18577+ unsigned long time;
18578
18579-unsigned long arch_randomize_brk(struct mm_struct *mm)
18580-{
18581- unsigned long range_end = mm->brk + 0x02000000;
18582- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18583-}
18584+ if (!randomize_va_space)
18585+ return;
18586+
18587+ if (v8086_mode(regs))
18588+ return;
18589
18590+ rdtscl(time);
18591+
18592+ /* P4 seems to return a 0 LSB, ignore it */
18593+#ifdef CONFIG_MPENTIUM4
18594+ time &= 0x3EUL;
18595+ time <<= 2;
18596+#elif defined(CONFIG_X86_64)
18597+ time &= 0xFUL;
18598+ time <<= 4;
18599+#else
18600+ time &= 0x1FUL;
18601+ time <<= 3;
18602+#endif
18603+
18604+ thread->sp0 ^= time;
18605+ load_sp0(init_tss + smp_processor_id(), thread);
18606+
18607+#ifdef CONFIG_X86_64
18608+ percpu_write(kernel_stack, thread->sp0);
18609+#endif
18610+}
18611+#endif
18612diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18613index ae68473..7b0bb71 100644
18614--- a/arch/x86/kernel/process_32.c
18615+++ b/arch/x86/kernel/process_32.c
18616@@ -64,6 +64,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18617 unsigned long thread_saved_pc(struct task_struct *tsk)
18618 {
18619 return ((unsigned long *)tsk->thread.sp)[3];
18620+//XXX return tsk->thread.eip;
18621 }
18622
18623 void __show_regs(struct pt_regs *regs, int all)
18624@@ -73,15 +74,14 @@ void __show_regs(struct pt_regs *regs, int all)
18625 unsigned long sp;
18626 unsigned short ss, gs;
18627
18628- if (user_mode_vm(regs)) {
18629+ if (user_mode(regs)) {
18630 sp = regs->sp;
18631 ss = regs->ss & 0xffff;
18632- gs = get_user_gs(regs);
18633 } else {
18634 sp = kernel_stack_pointer(regs);
18635 savesegment(ss, ss);
18636- savesegment(gs, gs);
18637 }
18638+ gs = get_user_gs(regs);
18639
18640 show_regs_common();
18641
18642@@ -143,13 +143,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18643 struct task_struct *tsk;
18644 int err;
18645
18646- childregs = task_pt_regs(p);
18647+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18648 *childregs = *regs;
18649 childregs->ax = 0;
18650 childregs->sp = sp;
18651
18652 p->thread.sp = (unsigned long) childregs;
18653 p->thread.sp0 = (unsigned long) (childregs+1);
18654+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18655
18656 p->thread.ip = (unsigned long) ret_from_fork;
18657
18658@@ -240,7 +241,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18659 struct thread_struct *prev = &prev_p->thread,
18660 *next = &next_p->thread;
18661 int cpu = smp_processor_id();
18662- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18663+ struct tss_struct *tss = init_tss + cpu;
18664 fpu_switch_t fpu;
18665
18666 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18667@@ -264,6 +265,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18668 */
18669 lazy_save_gs(prev->gs);
18670
18671+#ifdef CONFIG_PAX_MEMORY_UDEREF
18672+ __set_fs(task_thread_info(next_p)->addr_limit);
18673+#endif
18674+
18675 /*
18676 * Load the per-thread Thread-Local Storage descriptor.
18677 */
18678@@ -294,6 +299,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18679 */
18680 arch_end_context_switch(next_p);
18681
18682+ percpu_write(current_task, next_p);
18683+ percpu_write(current_tinfo, &next_p->tinfo);
18684+
18685 /*
18686 * Restore %gs if needed (which is common)
18687 */
18688@@ -302,8 +310,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18689
18690 switch_fpu_finish(next_p, fpu);
18691
18692- percpu_write(current_task, next_p);
18693-
18694 return prev_p;
18695 }
18696
18697@@ -333,4 +339,3 @@ unsigned long get_wchan(struct task_struct *p)
18698 } while (count++ < 16);
18699 return 0;
18700 }
18701-
18702diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
18703index 43d8b48..c45d566 100644
18704--- a/arch/x86/kernel/process_64.c
18705+++ b/arch/x86/kernel/process_64.c
18706@@ -162,8 +162,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18707 struct pt_regs *childregs;
18708 struct task_struct *me = current;
18709
18710- childregs = ((struct pt_regs *)
18711- (THREAD_SIZE + task_stack_page(p))) - 1;
18712+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
18713 *childregs = *regs;
18714
18715 childregs->ax = 0;
18716@@ -175,6 +174,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18717 p->thread.sp = (unsigned long) childregs;
18718 p->thread.sp0 = (unsigned long) (childregs+1);
18719 p->thread.usersp = me->thread.usersp;
18720+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18721
18722 set_tsk_thread_flag(p, TIF_FORK);
18723
18724@@ -280,7 +280,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18725 struct thread_struct *prev = &prev_p->thread;
18726 struct thread_struct *next = &next_p->thread;
18727 int cpu = smp_processor_id();
18728- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18729+ struct tss_struct *tss = init_tss + cpu;
18730 unsigned fsindex, gsindex;
18731 fpu_switch_t fpu;
18732
18733@@ -362,10 +362,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18734 prev->usersp = percpu_read(old_rsp);
18735 percpu_write(old_rsp, next->usersp);
18736 percpu_write(current_task, next_p);
18737+ percpu_write(current_tinfo, &next_p->tinfo);
18738
18739- percpu_write(kernel_stack,
18740- (unsigned long)task_stack_page(next_p) +
18741- THREAD_SIZE - KERNEL_STACK_OFFSET);
18742+ percpu_write(kernel_stack, next->sp0);
18743
18744 /*
18745 * Now maybe reload the debug registers and handle I/O bitmaps
18746@@ -434,12 +433,11 @@ unsigned long get_wchan(struct task_struct *p)
18747 if (!p || p == current || p->state == TASK_RUNNING)
18748 return 0;
18749 stack = (unsigned long)task_stack_page(p);
18750- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18751+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18752 return 0;
18753 fp = *(u64 *)(p->thread.sp);
18754 do {
18755- if (fp < (unsigned long)stack ||
18756- fp >= (unsigned long)stack+THREAD_SIZE)
18757+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18758 return 0;
18759 ip = *(u64 *)(fp+8);
18760 if (!in_sched_functions(ip))
18761diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18762index cf11783..e7ce551 100644
18763--- a/arch/x86/kernel/ptrace.c
18764+++ b/arch/x86/kernel/ptrace.c
18765@@ -824,7 +824,7 @@ long arch_ptrace(struct task_struct *child, long request,
18766 unsigned long addr, unsigned long data)
18767 {
18768 int ret;
18769- unsigned long __user *datap = (unsigned long __user *)data;
18770+ unsigned long __user *datap = (__force unsigned long __user *)data;
18771
18772 switch (request) {
18773 /* read the word at location addr in the USER area. */
18774@@ -909,14 +909,14 @@ long arch_ptrace(struct task_struct *child, long request,
18775 if ((int) addr < 0)
18776 return -EIO;
18777 ret = do_get_thread_area(child, addr,
18778- (struct user_desc __user *)data);
18779+ (__force struct user_desc __user *) data);
18780 break;
18781
18782 case PTRACE_SET_THREAD_AREA:
18783 if ((int) addr < 0)
18784 return -EIO;
18785 ret = do_set_thread_area(child, addr,
18786- (struct user_desc __user *)data, 0);
18787+ (__force struct user_desc __user *) data, 0);
18788 break;
18789 #endif
18790
18791@@ -1426,7 +1426,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
18792 memset(info, 0, sizeof(*info));
18793 info->si_signo = SIGTRAP;
18794 info->si_code = si_code;
18795- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
18796+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
18797 }
18798
18799 void user_single_step_siginfo(struct task_struct *tsk,
18800@@ -1455,6 +1455,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18801 # define IS_IA32 0
18802 #endif
18803
18804+#ifdef CONFIG_GRKERNSEC_SETXID
18805+extern void gr_delayed_cred_worker(void);
18806+#endif
18807+
18808 /*
18809 * We must return the syscall number to actually look up in the table.
18810 * This can be -1L to skip running any syscall at all.
18811@@ -1463,6 +1467,11 @@ long syscall_trace_enter(struct pt_regs *regs)
18812 {
18813 long ret = 0;
18814
18815+#ifdef CONFIG_GRKERNSEC_SETXID
18816+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18817+ gr_delayed_cred_worker();
18818+#endif
18819+
18820 /*
18821 * If we stepped into a sysenter/syscall insn, it trapped in
18822 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
18823@@ -1506,6 +1515,11 @@ void syscall_trace_leave(struct pt_regs *regs)
18824 {
18825 bool step;
18826
18827+#ifdef CONFIG_GRKERNSEC_SETXID
18828+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18829+ gr_delayed_cred_worker();
18830+#endif
18831+
18832 audit_syscall_exit(regs);
18833
18834 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
18835diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
18836index 42eb330..139955c 100644
18837--- a/arch/x86/kernel/pvclock.c
18838+++ b/arch/x86/kernel/pvclock.c
18839@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
18840 return pv_tsc_khz;
18841 }
18842
18843-static atomic64_t last_value = ATOMIC64_INIT(0);
18844+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
18845
18846 void pvclock_resume(void)
18847 {
18848- atomic64_set(&last_value, 0);
18849+ atomic64_set_unchecked(&last_value, 0);
18850 }
18851
18852 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18853@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18854 * updating at the same time, and one of them could be slightly behind,
18855 * making the assumption that last_value always go forward fail to hold.
18856 */
18857- last = atomic64_read(&last_value);
18858+ last = atomic64_read_unchecked(&last_value);
18859 do {
18860 if (ret < last)
18861 return last;
18862- last = atomic64_cmpxchg(&last_value, last, ret);
18863+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
18864 } while (unlikely(last != ret));
18865
18866 return ret;
18867diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18868index d840e69..98e9581 100644
18869--- a/arch/x86/kernel/reboot.c
18870+++ b/arch/x86/kernel/reboot.c
18871@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
18872 EXPORT_SYMBOL(pm_power_off);
18873
18874 static const struct desc_ptr no_idt = {};
18875-static int reboot_mode;
18876+static unsigned short reboot_mode;
18877 enum reboot_type reboot_type = BOOT_ACPI;
18878 int reboot_force;
18879
18880@@ -335,13 +335,17 @@ core_initcall(reboot_init);
18881 extern const unsigned char machine_real_restart_asm[];
18882 extern const u64 machine_real_restart_gdt[3];
18883
18884-void machine_real_restart(unsigned int type)
18885+__noreturn void machine_real_restart(unsigned int type)
18886 {
18887 void *restart_va;
18888 unsigned long restart_pa;
18889- void (*restart_lowmem)(unsigned int);
18890+ void (* __noreturn restart_lowmem)(unsigned int);
18891 u64 *lowmem_gdt;
18892
18893+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18894+ struct desc_struct *gdt;
18895+#endif
18896+
18897 local_irq_disable();
18898
18899 /* Write zero to CMOS register number 0x0f, which the BIOS POST
18900@@ -367,14 +371,14 @@ void machine_real_restart(unsigned int type)
18901 boot)". This seems like a fairly standard thing that gets set by
18902 REBOOT.COM programs, and the previous reset routine did this
18903 too. */
18904- *((unsigned short *)0x472) = reboot_mode;
18905+ *(unsigned short *)(__va(0x472)) = reboot_mode;
18906
18907 /* Patch the GDT in the low memory trampoline */
18908 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
18909
18910 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
18911 restart_pa = virt_to_phys(restart_va);
18912- restart_lowmem = (void (*)(unsigned int))restart_pa;
18913+ restart_lowmem = (void *)restart_pa;
18914
18915 /* GDT[0]: GDT self-pointer */
18916 lowmem_gdt[0] =
18917@@ -385,7 +389,33 @@ void machine_real_restart(unsigned int type)
18918 GDT_ENTRY(0x009b, restart_pa, 0xffff);
18919
18920 /* Jump to the identity-mapped low memory code */
18921+
18922+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18923+ gdt = get_cpu_gdt_table(smp_processor_id());
18924+ pax_open_kernel();
18925+#ifdef CONFIG_PAX_MEMORY_UDEREF
18926+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
18927+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
18928+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
18929+#endif
18930+#ifdef CONFIG_PAX_KERNEXEC
18931+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
18932+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
18933+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
18934+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
18935+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
18936+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
18937+#endif
18938+ pax_close_kernel();
18939+#endif
18940+
18941+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18942+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
18943+ unreachable();
18944+#else
18945 restart_lowmem(type);
18946+#endif
18947+
18948 }
18949 #ifdef CONFIG_APM_MODULE
18950 EXPORT_SYMBOL(machine_real_restart);
18951@@ -556,7 +586,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
18952 * try to force a triple fault and then cycle between hitting the keyboard
18953 * controller and doing that
18954 */
18955-static void native_machine_emergency_restart(void)
18956+__noreturn static void native_machine_emergency_restart(void)
18957 {
18958 int i;
18959 int attempt = 0;
18960@@ -680,13 +710,13 @@ void native_machine_shutdown(void)
18961 #endif
18962 }
18963
18964-static void __machine_emergency_restart(int emergency)
18965+static __noreturn void __machine_emergency_restart(int emergency)
18966 {
18967 reboot_emergency = emergency;
18968 machine_ops.emergency_restart();
18969 }
18970
18971-static void native_machine_restart(char *__unused)
18972+static __noreturn void native_machine_restart(char *__unused)
18973 {
18974 printk("machine restart\n");
18975
18976@@ -695,7 +725,7 @@ static void native_machine_restart(char *__unused)
18977 __machine_emergency_restart(0);
18978 }
18979
18980-static void native_machine_halt(void)
18981+static __noreturn void native_machine_halt(void)
18982 {
18983 /* stop other cpus and apics */
18984 machine_shutdown();
18985@@ -706,7 +736,7 @@ static void native_machine_halt(void)
18986 stop_this_cpu(NULL);
18987 }
18988
18989-static void native_machine_power_off(void)
18990+__noreturn static void native_machine_power_off(void)
18991 {
18992 if (pm_power_off) {
18993 if (!reboot_force)
18994@@ -715,6 +745,7 @@ static void native_machine_power_off(void)
18995 }
18996 /* a fallback in case there is no PM info available */
18997 tboot_shutdown(TB_SHUTDOWN_HALT);
18998+ unreachable();
18999 }
19000
19001 struct machine_ops machine_ops = {
19002diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
19003index 7a6f3b3..bed145d7 100644
19004--- a/arch/x86/kernel/relocate_kernel_64.S
19005+++ b/arch/x86/kernel/relocate_kernel_64.S
19006@@ -11,6 +11,7 @@
19007 #include <asm/kexec.h>
19008 #include <asm/processor-flags.h>
19009 #include <asm/pgtable_types.h>
19010+#include <asm/alternative-asm.h>
19011
19012 /*
19013 * Must be relocatable PIC code callable as a C function
19014@@ -160,13 +161,14 @@ identity_mapped:
19015 xorq %rbp, %rbp
19016 xorq %r8, %r8
19017 xorq %r9, %r9
19018- xorq %r10, %r9
19019+ xorq %r10, %r10
19020 xorq %r11, %r11
19021 xorq %r12, %r12
19022 xorq %r13, %r13
19023 xorq %r14, %r14
19024 xorq %r15, %r15
19025
19026+ pax_force_retaddr 0, 1
19027 ret
19028
19029 1:
19030diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
19031index 1a29015..712f324 100644
19032--- a/arch/x86/kernel/setup.c
19033+++ b/arch/x86/kernel/setup.c
19034@@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
19035
19036 switch (data->type) {
19037 case SETUP_E820_EXT:
19038- parse_e820_ext(data);
19039+ parse_e820_ext((struct setup_data __force_kernel *)data);
19040 break;
19041 case SETUP_DTB:
19042 add_dtb(pa_data);
19043@@ -639,7 +639,7 @@ static void __init trim_bios_range(void)
19044 * area (640->1Mb) as ram even though it is not.
19045 * take them out.
19046 */
19047- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
19048+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
19049 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
19050 }
19051
19052@@ -763,14 +763,14 @@ void __init setup_arch(char **cmdline_p)
19053
19054 if (!boot_params.hdr.root_flags)
19055 root_mountflags &= ~MS_RDONLY;
19056- init_mm.start_code = (unsigned long) _text;
19057- init_mm.end_code = (unsigned long) _etext;
19058+ init_mm.start_code = ktla_ktva((unsigned long) _text);
19059+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
19060 init_mm.end_data = (unsigned long) _edata;
19061 init_mm.brk = _brk_end;
19062
19063- code_resource.start = virt_to_phys(_text);
19064- code_resource.end = virt_to_phys(_etext)-1;
19065- data_resource.start = virt_to_phys(_etext);
19066+ code_resource.start = virt_to_phys(ktla_ktva(_text));
19067+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19068+ data_resource.start = virt_to_phys(_sdata);
19069 data_resource.end = virt_to_phys(_edata)-1;
19070 bss_resource.start = virt_to_phys(&__bss_start);
19071 bss_resource.end = virt_to_phys(&__bss_stop)-1;
19072diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
19073index 5a98aa2..2f9288d 100644
19074--- a/arch/x86/kernel/setup_percpu.c
19075+++ b/arch/x86/kernel/setup_percpu.c
19076@@ -21,19 +21,17 @@
19077 #include <asm/cpu.h>
19078 #include <asm/stackprotector.h>
19079
19080-DEFINE_PER_CPU(int, cpu_number);
19081+#ifdef CONFIG_SMP
19082+DEFINE_PER_CPU(unsigned int, cpu_number);
19083 EXPORT_PER_CPU_SYMBOL(cpu_number);
19084+#endif
19085
19086-#ifdef CONFIG_X86_64
19087 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19088-#else
19089-#define BOOT_PERCPU_OFFSET 0
19090-#endif
19091
19092 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19093 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19094
19095-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19096+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19097 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19098 };
19099 EXPORT_SYMBOL(__per_cpu_offset);
19100@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
19101 {
19102 #ifdef CONFIG_X86_32
19103 struct desc_struct gdt;
19104+ unsigned long base = per_cpu_offset(cpu);
19105
19106- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19107- 0x2 | DESCTYPE_S, 0x8);
19108- gdt.s = 1;
19109+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19110+ 0x83 | DESCTYPE_S, 0xC);
19111 write_gdt_entry(get_cpu_gdt_table(cpu),
19112 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19113 #endif
19114@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
19115 /* alrighty, percpu areas up and running */
19116 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19117 for_each_possible_cpu(cpu) {
19118+#ifdef CONFIG_CC_STACKPROTECTOR
19119+#ifdef CONFIG_X86_32
19120+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
19121+#endif
19122+#endif
19123 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19124 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19125 per_cpu(cpu_number, cpu) = cpu;
19126@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
19127 */
19128 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
19129 #endif
19130+#ifdef CONFIG_CC_STACKPROTECTOR
19131+#ifdef CONFIG_X86_32
19132+ if (!cpu)
19133+ per_cpu(stack_canary.canary, cpu) = canary;
19134+#endif
19135+#endif
19136 /*
19137 * Up to this point, the boot CPU has been using .init.data
19138 * area. Reload any changed state for the boot CPU.
19139diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19140index 115eac4..c0591d5 100644
19141--- a/arch/x86/kernel/signal.c
19142+++ b/arch/x86/kernel/signal.c
19143@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
19144 * Align the stack pointer according to the i386 ABI,
19145 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19146 */
19147- sp = ((sp + 4) & -16ul) - 4;
19148+ sp = ((sp - 12) & -16ul) - 4;
19149 #else /* !CONFIG_X86_32 */
19150 sp = round_down(sp, 16) - 8;
19151 #endif
19152@@ -241,11 +241,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19153 * Return an always-bogus address instead so we will die with SIGSEGV.
19154 */
19155 if (onsigstack && !likely(on_sig_stack(sp)))
19156- return (void __user *)-1L;
19157+ return (__force void __user *)-1L;
19158
19159 /* save i387 state */
19160 if (used_math() && save_i387_xstate(*fpstate) < 0)
19161- return (void __user *)-1L;
19162+ return (__force void __user *)-1L;
19163
19164 return (void __user *)sp;
19165 }
19166@@ -300,9 +300,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19167 }
19168
19169 if (current->mm->context.vdso)
19170- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19171+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19172 else
19173- restorer = &frame->retcode;
19174+ restorer = (void __user *)&frame->retcode;
19175 if (ka->sa.sa_flags & SA_RESTORER)
19176 restorer = ka->sa.sa_restorer;
19177
19178@@ -316,7 +316,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19179 * reasons and because gdb uses it as a signature to notice
19180 * signal handler stack frames.
19181 */
19182- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19183+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19184
19185 if (err)
19186 return -EFAULT;
19187@@ -370,7 +370,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19188 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19189
19190 /* Set up to return from userspace. */
19191- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19192+ if (current->mm->context.vdso)
19193+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19194+ else
19195+ restorer = (void __user *)&frame->retcode;
19196 if (ka->sa.sa_flags & SA_RESTORER)
19197 restorer = ka->sa.sa_restorer;
19198 put_user_ex(restorer, &frame->pretcode);
19199@@ -382,7 +385,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19200 * reasons and because gdb uses it as a signature to notice
19201 * signal handler stack frames.
19202 */
19203- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19204+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19205 } put_user_catch(err);
19206
19207 if (err)
19208@@ -773,7 +776,7 @@ static void do_signal(struct pt_regs *regs)
19209 * X86_32: vm86 regs switched out by assembly code before reaching
19210 * here, so testing against kernel CS suffices.
19211 */
19212- if (!user_mode(regs))
19213+ if (!user_mode_novm(regs))
19214 return;
19215
19216 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
19217diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19218index 6e1e406..edfb7cb 100644
19219--- a/arch/x86/kernel/smpboot.c
19220+++ b/arch/x86/kernel/smpboot.c
19221@@ -699,17 +699,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
19222 set_idle_for_cpu(cpu, c_idle.idle);
19223 do_rest:
19224 per_cpu(current_task, cpu) = c_idle.idle;
19225+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19226 #ifdef CONFIG_X86_32
19227 /* Stack for startup_32 can be just as for start_secondary onwards */
19228 irq_ctx_init(cpu);
19229 #else
19230 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19231 initial_gs = per_cpu_offset(cpu);
19232- per_cpu(kernel_stack, cpu) =
19233- (unsigned long)task_stack_page(c_idle.idle) -
19234- KERNEL_STACK_OFFSET + THREAD_SIZE;
19235+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
19236 #endif
19237+
19238+ pax_open_kernel();
19239 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19240+ pax_close_kernel();
19241+
19242 initial_code = (unsigned long)start_secondary;
19243 stack_start = c_idle.idle->thread.sp;
19244
19245@@ -851,6 +854,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
19246
19247 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19248
19249+#ifdef CONFIG_PAX_PER_CPU_PGD
19250+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19251+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19252+ KERNEL_PGD_PTRS);
19253+#endif
19254+
19255 err = do_boot_cpu(apicid, cpu);
19256 if (err) {
19257 pr_debug("do_boot_cpu failed %d\n", err);
19258diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19259index c346d11..d43b163 100644
19260--- a/arch/x86/kernel/step.c
19261+++ b/arch/x86/kernel/step.c
19262@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19263 struct desc_struct *desc;
19264 unsigned long base;
19265
19266- seg &= ~7UL;
19267+ seg >>= 3;
19268
19269 mutex_lock(&child->mm->context.lock);
19270- if (unlikely((seg >> 3) >= child->mm->context.size))
19271+ if (unlikely(seg >= child->mm->context.size))
19272 addr = -1L; /* bogus selector, access would fault */
19273 else {
19274 desc = child->mm->context.ldt + seg;
19275@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19276 addr += base;
19277 }
19278 mutex_unlock(&child->mm->context.lock);
19279- }
19280+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19281+ addr = ktla_ktva(addr);
19282
19283 return addr;
19284 }
19285@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19286 unsigned char opcode[15];
19287 unsigned long addr = convert_ip_to_linear(child, regs);
19288
19289+ if (addr == -EINVAL)
19290+ return 0;
19291+
19292 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19293 for (i = 0; i < copied; i++) {
19294 switch (opcode[i]) {
19295diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19296index 0b0cb5f..db6b9ed 100644
19297--- a/arch/x86/kernel/sys_i386_32.c
19298+++ b/arch/x86/kernel/sys_i386_32.c
19299@@ -24,17 +24,224 @@
19300
19301 #include <asm/syscalls.h>
19302
19303-/*
19304- * Do a system call from kernel instead of calling sys_execve so we
19305- * end up with proper pt_regs.
19306- */
19307-int kernel_execve(const char *filename,
19308- const char *const argv[],
19309- const char *const envp[])
19310+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19311 {
19312- long __res;
19313- asm volatile ("int $0x80"
19314- : "=a" (__res)
19315- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
19316- return __res;
19317+ unsigned long pax_task_size = TASK_SIZE;
19318+
19319+#ifdef CONFIG_PAX_SEGMEXEC
19320+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19321+ pax_task_size = SEGMEXEC_TASK_SIZE;
19322+#endif
19323+
19324+ if (len > pax_task_size || addr > pax_task_size - len)
19325+ return -EINVAL;
19326+
19327+ return 0;
19328+}
19329+
19330+unsigned long
19331+arch_get_unmapped_area(struct file *filp, unsigned long addr,
19332+ unsigned long len, unsigned long pgoff, unsigned long flags)
19333+{
19334+ struct mm_struct *mm = current->mm;
19335+ struct vm_area_struct *vma;
19336+ unsigned long start_addr, pax_task_size = TASK_SIZE;
19337+
19338+#ifdef CONFIG_PAX_SEGMEXEC
19339+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19340+ pax_task_size = SEGMEXEC_TASK_SIZE;
19341+#endif
19342+
19343+ pax_task_size -= PAGE_SIZE;
19344+
19345+ if (len > pax_task_size)
19346+ return -ENOMEM;
19347+
19348+ if (flags & MAP_FIXED)
19349+ return addr;
19350+
19351+#ifdef CONFIG_PAX_RANDMMAP
19352+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19353+#endif
19354+
19355+ if (addr) {
19356+ addr = PAGE_ALIGN(addr);
19357+ if (pax_task_size - len >= addr) {
19358+ vma = find_vma(mm, addr);
19359+ if (check_heap_stack_gap(vma, addr, len))
19360+ return addr;
19361+ }
19362+ }
19363+ if (len > mm->cached_hole_size) {
19364+ start_addr = addr = mm->free_area_cache;
19365+ } else {
19366+ start_addr = addr = mm->mmap_base;
19367+ mm->cached_hole_size = 0;
19368+ }
19369+
19370+#ifdef CONFIG_PAX_PAGEEXEC
19371+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19372+ start_addr = 0x00110000UL;
19373+
19374+#ifdef CONFIG_PAX_RANDMMAP
19375+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19376+ start_addr += mm->delta_mmap & 0x03FFF000UL;
19377+#endif
19378+
19379+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19380+ start_addr = addr = mm->mmap_base;
19381+ else
19382+ addr = start_addr;
19383+ }
19384+#endif
19385+
19386+full_search:
19387+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19388+ /* At this point: (!vma || addr < vma->vm_end). */
19389+ if (pax_task_size - len < addr) {
19390+ /*
19391+ * Start a new search - just in case we missed
19392+ * some holes.
19393+ */
19394+ if (start_addr != mm->mmap_base) {
19395+ start_addr = addr = mm->mmap_base;
19396+ mm->cached_hole_size = 0;
19397+ goto full_search;
19398+ }
19399+ return -ENOMEM;
19400+ }
19401+ if (check_heap_stack_gap(vma, addr, len))
19402+ break;
19403+ if (addr + mm->cached_hole_size < vma->vm_start)
19404+ mm->cached_hole_size = vma->vm_start - addr;
19405+ addr = vma->vm_end;
19406+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
19407+ start_addr = addr = mm->mmap_base;
19408+ mm->cached_hole_size = 0;
19409+ goto full_search;
19410+ }
19411+ }
19412+
19413+ /*
19414+ * Remember the place where we stopped the search:
19415+ */
19416+ mm->free_area_cache = addr + len;
19417+ return addr;
19418+}
19419+
19420+unsigned long
19421+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19422+ const unsigned long len, const unsigned long pgoff,
19423+ const unsigned long flags)
19424+{
19425+ struct vm_area_struct *vma;
19426+ struct mm_struct *mm = current->mm;
19427+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19428+
19429+#ifdef CONFIG_PAX_SEGMEXEC
19430+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19431+ pax_task_size = SEGMEXEC_TASK_SIZE;
19432+#endif
19433+
19434+ pax_task_size -= PAGE_SIZE;
19435+
19436+ /* requested length too big for entire address space */
19437+ if (len > pax_task_size)
19438+ return -ENOMEM;
19439+
19440+ if (flags & MAP_FIXED)
19441+ return addr;
19442+
19443+#ifdef CONFIG_PAX_PAGEEXEC
19444+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19445+ goto bottomup;
19446+#endif
19447+
19448+#ifdef CONFIG_PAX_RANDMMAP
19449+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19450+#endif
19451+
19452+ /* requesting a specific address */
19453+ if (addr) {
19454+ addr = PAGE_ALIGN(addr);
19455+ if (pax_task_size - len >= addr) {
19456+ vma = find_vma(mm, addr);
19457+ if (check_heap_stack_gap(vma, addr, len))
19458+ return addr;
19459+ }
19460+ }
19461+
19462+ /* check if free_area_cache is useful for us */
19463+ if (len <= mm->cached_hole_size) {
19464+ mm->cached_hole_size = 0;
19465+ mm->free_area_cache = mm->mmap_base;
19466+ }
19467+
19468+ /* either no address requested or can't fit in requested address hole */
19469+ addr = mm->free_area_cache;
19470+
19471+ /* make sure it can fit in the remaining address space */
19472+ if (addr > len) {
19473+ vma = find_vma(mm, addr-len);
19474+ if (check_heap_stack_gap(vma, addr - len, len))
19475+ /* remember the address as a hint for next time */
19476+ return (mm->free_area_cache = addr-len);
19477+ }
19478+
19479+ if (mm->mmap_base < len)
19480+ goto bottomup;
19481+
19482+ addr = mm->mmap_base-len;
19483+
19484+ do {
19485+ /*
19486+ * Lookup failure means no vma is above this address,
19487+ * else if new region fits below vma->vm_start,
19488+ * return with success:
19489+ */
19490+ vma = find_vma(mm, addr);
19491+ if (check_heap_stack_gap(vma, addr, len))
19492+ /* remember the address as a hint for next time */
19493+ return (mm->free_area_cache = addr);
19494+
19495+ /* remember the largest hole we saw so far */
19496+ if (addr + mm->cached_hole_size < vma->vm_start)
19497+ mm->cached_hole_size = vma->vm_start - addr;
19498+
19499+ /* try just below the current vma->vm_start */
19500+ addr = skip_heap_stack_gap(vma, len);
19501+ } while (!IS_ERR_VALUE(addr));
19502+
19503+bottomup:
19504+ /*
19505+ * A failed mmap() very likely causes application failure,
19506+ * so fall back to the bottom-up function here. This scenario
19507+ * can happen with large stack limits and large mmap()
19508+ * allocations.
19509+ */
19510+
19511+#ifdef CONFIG_PAX_SEGMEXEC
19512+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19513+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19514+ else
19515+#endif
19516+
19517+ mm->mmap_base = TASK_UNMAPPED_BASE;
19518+
19519+#ifdef CONFIG_PAX_RANDMMAP
19520+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19521+ mm->mmap_base += mm->delta_mmap;
19522+#endif
19523+
19524+ mm->free_area_cache = mm->mmap_base;
19525+ mm->cached_hole_size = ~0UL;
19526+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19527+ /*
19528+ * Restore the topdown base:
19529+ */
19530+ mm->mmap_base = base;
19531+ mm->free_area_cache = base;
19532+ mm->cached_hole_size = ~0UL;
19533+
19534+ return addr;
19535 }
19536diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19537index b4d3c39..82bb73b 100644
19538--- a/arch/x86/kernel/sys_x86_64.c
19539+++ b/arch/x86/kernel/sys_x86_64.c
19540@@ -95,8 +95,8 @@ out:
19541 return error;
19542 }
19543
19544-static void find_start_end(unsigned long flags, unsigned long *begin,
19545- unsigned long *end)
19546+static void find_start_end(struct mm_struct *mm, unsigned long flags,
19547+ unsigned long *begin, unsigned long *end)
19548 {
19549 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
19550 unsigned long new_begin;
19551@@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19552 *begin = new_begin;
19553 }
19554 } else {
19555- *begin = TASK_UNMAPPED_BASE;
19556+ *begin = mm->mmap_base;
19557 *end = TASK_SIZE;
19558 }
19559 }
19560@@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19561 if (flags & MAP_FIXED)
19562 return addr;
19563
19564- find_start_end(flags, &begin, &end);
19565+ find_start_end(mm, flags, &begin, &end);
19566
19567 if (len > end)
19568 return -ENOMEM;
19569
19570+#ifdef CONFIG_PAX_RANDMMAP
19571+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19572+#endif
19573+
19574 if (addr) {
19575 addr = PAGE_ALIGN(addr);
19576 vma = find_vma(mm, addr);
19577- if (end - len >= addr &&
19578- (!vma || addr + len <= vma->vm_start))
19579+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19580 return addr;
19581 }
19582 if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
19583@@ -172,7 +175,7 @@ full_search:
19584 }
19585 return -ENOMEM;
19586 }
19587- if (!vma || addr + len <= vma->vm_start) {
19588+ if (check_heap_stack_gap(vma, addr, len)) {
19589 /*
19590 * Remember the place where we stopped the search:
19591 */
19592@@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19593 {
19594 struct vm_area_struct *vma;
19595 struct mm_struct *mm = current->mm;
19596- unsigned long addr = addr0, start_addr;
19597+ unsigned long base = mm->mmap_base, addr = addr0, start_addr;
19598
19599 /* requested length too big for entire address space */
19600 if (len > TASK_SIZE)
19601@@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19602 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
19603 goto bottomup;
19604
19605+#ifdef CONFIG_PAX_RANDMMAP
19606+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19607+#endif
19608+
19609 /* requesting a specific address */
19610 if (addr) {
19611 addr = PAGE_ALIGN(addr);
19612- vma = find_vma(mm, addr);
19613- if (TASK_SIZE - len >= addr &&
19614- (!vma || addr + len <= vma->vm_start))
19615- return addr;
19616+ if (TASK_SIZE - len >= addr) {
19617+ vma = find_vma(mm, addr);
19618+ if (check_heap_stack_gap(vma, addr, len))
19619+ return addr;
19620+ }
19621 }
19622
19623 /* check if free_area_cache is useful for us */
19624@@ -240,7 +248,7 @@ try_again:
19625 * return with success:
19626 */
19627 vma = find_vma(mm, addr);
19628- if (!vma || addr+len <= vma->vm_start)
19629+ if (check_heap_stack_gap(vma, addr, len))
19630 /* remember the address as a hint for next time */
19631 return mm->free_area_cache = addr;
19632
19633@@ -249,8 +257,8 @@ try_again:
19634 mm->cached_hole_size = vma->vm_start - addr;
19635
19636 /* try just below the current vma->vm_start */
19637- addr = vma->vm_start-len;
19638- } while (len < vma->vm_start);
19639+ addr = skip_heap_stack_gap(vma, len);
19640+ } while (!IS_ERR_VALUE(addr));
19641
19642 fail:
19643 /*
19644@@ -270,13 +278,21 @@ bottomup:
19645 * can happen with large stack limits and large mmap()
19646 * allocations.
19647 */
19648+ mm->mmap_base = TASK_UNMAPPED_BASE;
19649+
19650+#ifdef CONFIG_PAX_RANDMMAP
19651+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19652+ mm->mmap_base += mm->delta_mmap;
19653+#endif
19654+
19655+ mm->free_area_cache = mm->mmap_base;
19656 mm->cached_hole_size = ~0UL;
19657- mm->free_area_cache = TASK_UNMAPPED_BASE;
19658 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19659 /*
19660 * Restore the topdown base:
19661 */
19662- mm->free_area_cache = mm->mmap_base;
19663+ mm->mmap_base = base;
19664+ mm->free_area_cache = base;
19665 mm->cached_hole_size = ~0UL;
19666
19667 return addr;
19668diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
19669index 6410744..79758f0 100644
19670--- a/arch/x86/kernel/tboot.c
19671+++ b/arch/x86/kernel/tboot.c
19672@@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
19673
19674 void tboot_shutdown(u32 shutdown_type)
19675 {
19676- void (*shutdown)(void);
19677+ void (* __noreturn shutdown)(void);
19678
19679 if (!tboot_enabled())
19680 return;
19681@@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
19682
19683 switch_to_tboot_pt();
19684
19685- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
19686+ shutdown = (void *)tboot->shutdown_entry;
19687 shutdown();
19688
19689 /* should not reach here */
19690@@ -299,7 +299,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
19691 return 0;
19692 }
19693
19694-static atomic_t ap_wfs_count;
19695+static atomic_unchecked_t ap_wfs_count;
19696
19697 static int tboot_wait_for_aps(int num_aps)
19698 {
19699@@ -323,9 +323,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
19700 {
19701 switch (action) {
19702 case CPU_DYING:
19703- atomic_inc(&ap_wfs_count);
19704+ atomic_inc_unchecked(&ap_wfs_count);
19705 if (num_online_cpus() == 1)
19706- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
19707+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
19708 return NOTIFY_BAD;
19709 break;
19710 }
19711@@ -344,7 +344,7 @@ static __init int tboot_late_init(void)
19712
19713 tboot_create_trampoline();
19714
19715- atomic_set(&ap_wfs_count, 0);
19716+ atomic_set_unchecked(&ap_wfs_count, 0);
19717 register_hotcpu_notifier(&tboot_cpu_notifier);
19718
19719 acpi_os_set_prepare_sleep(&tboot_sleep);
19720diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
19721index c6eba2b..3303326 100644
19722--- a/arch/x86/kernel/time.c
19723+++ b/arch/x86/kernel/time.c
19724@@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
19725 {
19726 unsigned long pc = instruction_pointer(regs);
19727
19728- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
19729+ if (!user_mode(regs) && in_lock_functions(pc)) {
19730 #ifdef CONFIG_FRAME_POINTER
19731- return *(unsigned long *)(regs->bp + sizeof(long));
19732+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19733 #else
19734 unsigned long *sp =
19735 (unsigned long *)kernel_stack_pointer(regs);
19736@@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
19737 * or above a saved flags. Eflags has bits 22-31 zero,
19738 * kernel addresses don't.
19739 */
19740+
19741+#ifdef CONFIG_PAX_KERNEXEC
19742+ return ktla_ktva(sp[0]);
19743+#else
19744 if (sp[0] >> 22)
19745 return sp[0];
19746 if (sp[1] >> 22)
19747 return sp[1];
19748 #endif
19749+
19750+#endif
19751 }
19752 return pc;
19753 }
19754diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19755index 9d9d2f9..ed344e4 100644
19756--- a/arch/x86/kernel/tls.c
19757+++ b/arch/x86/kernel/tls.c
19758@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19759 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19760 return -EINVAL;
19761
19762+#ifdef CONFIG_PAX_SEGMEXEC
19763+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19764+ return -EINVAL;
19765+#endif
19766+
19767 set_tls_desc(p, idx, &info, 1);
19768
19769 return 0;
19770diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19771index 451c0a7..e57f551 100644
19772--- a/arch/x86/kernel/trampoline_32.S
19773+++ b/arch/x86/kernel/trampoline_32.S
19774@@ -32,6 +32,12 @@
19775 #include <asm/segment.h>
19776 #include <asm/page_types.h>
19777
19778+#ifdef CONFIG_PAX_KERNEXEC
19779+#define ta(X) (X)
19780+#else
19781+#define ta(X) ((X) - __PAGE_OFFSET)
19782+#endif
19783+
19784 #ifdef CONFIG_SMP
19785
19786 .section ".x86_trampoline","a"
19787@@ -62,7 +68,7 @@ r_base = .
19788 inc %ax # protected mode (PE) bit
19789 lmsw %ax # into protected mode
19790 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
19791- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
19792+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
19793
19794 # These need to be in the same 64K segment as the above;
19795 # hence we don't use the boot_gdt_descr defined in head.S
19796diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
19797index 09ff517..df19fbff 100644
19798--- a/arch/x86/kernel/trampoline_64.S
19799+++ b/arch/x86/kernel/trampoline_64.S
19800@@ -90,7 +90,7 @@ startup_32:
19801 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
19802 movl %eax, %ds
19803
19804- movl $X86_CR4_PAE, %eax
19805+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19806 movl %eax, %cr4 # Enable PAE mode
19807
19808 # Setup trampoline 4 level pagetables
19809@@ -138,7 +138,7 @@ tidt:
19810 # so the kernel can live anywhere
19811 .balign 4
19812 tgdt:
19813- .short tgdt_end - tgdt # gdt limit
19814+ .short tgdt_end - tgdt - 1 # gdt limit
19815 .long tgdt - r_base
19816 .short 0
19817 .quad 0x00cf9b000000ffff # __KERNEL32_CS
19818diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
19819index ff9281f1..30cb4ac 100644
19820--- a/arch/x86/kernel/traps.c
19821+++ b/arch/x86/kernel/traps.c
19822@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
19823
19824 /* Do we ignore FPU interrupts ? */
19825 char ignore_fpu_irq;
19826-
19827-/*
19828- * The IDT has to be page-aligned to simplify the Pentium
19829- * F0 0F bug workaround.
19830- */
19831-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
19832 #endif
19833
19834 DECLARE_BITMAP(used_vectors, NR_VECTORS);
19835@@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
19836 }
19837
19838 static void __kprobes
19839-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19840+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
19841 long error_code, siginfo_t *info)
19842 {
19843 struct task_struct *tsk = current;
19844
19845 #ifdef CONFIG_X86_32
19846- if (regs->flags & X86_VM_MASK) {
19847+ if (v8086_mode(regs)) {
19848 /*
19849 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
19850 * On nmi (interrupt 2), do_trap should not be called.
19851@@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19852 }
19853 #endif
19854
19855- if (!user_mode(regs))
19856+ if (!user_mode_novm(regs))
19857 goto kernel_trap;
19858
19859 #ifdef CONFIG_X86_32
19860@@ -148,7 +142,7 @@ trap_signal:
19861 printk_ratelimit()) {
19862 printk(KERN_INFO
19863 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
19864- tsk->comm, tsk->pid, str,
19865+ tsk->comm, task_pid_nr(tsk), str,
19866 regs->ip, regs->sp, error_code);
19867 print_vma_addr(" in ", regs->ip);
19868 printk("\n");
19869@@ -165,8 +159,20 @@ kernel_trap:
19870 if (!fixup_exception(regs)) {
19871 tsk->thread.error_code = error_code;
19872 tsk->thread.trap_nr = trapnr;
19873+
19874+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19875+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
19876+ str = "PAX: suspicious stack segment fault";
19877+#endif
19878+
19879 die(str, regs, error_code);
19880 }
19881+
19882+#ifdef CONFIG_PAX_REFCOUNT
19883+ if (trapnr == 4)
19884+ pax_report_refcount_overflow(regs);
19885+#endif
19886+
19887 return;
19888
19889 #ifdef CONFIG_X86_32
19890@@ -259,14 +265,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
19891 conditional_sti(regs);
19892
19893 #ifdef CONFIG_X86_32
19894- if (regs->flags & X86_VM_MASK)
19895+ if (v8086_mode(regs))
19896 goto gp_in_vm86;
19897 #endif
19898
19899 tsk = current;
19900- if (!user_mode(regs))
19901+ if (!user_mode_novm(regs))
19902 goto gp_in_kernel;
19903
19904+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19905+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
19906+ struct mm_struct *mm = tsk->mm;
19907+ unsigned long limit;
19908+
19909+ down_write(&mm->mmap_sem);
19910+ limit = mm->context.user_cs_limit;
19911+ if (limit < TASK_SIZE) {
19912+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
19913+ up_write(&mm->mmap_sem);
19914+ return;
19915+ }
19916+ up_write(&mm->mmap_sem);
19917+ }
19918+#endif
19919+
19920 tsk->thread.error_code = error_code;
19921 tsk->thread.trap_nr = X86_TRAP_GP;
19922
19923@@ -299,6 +321,13 @@ gp_in_kernel:
19924 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
19925 X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
19926 return;
19927+
19928+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19929+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
19930+ die("PAX: suspicious general protection fault", regs, error_code);
19931+ else
19932+#endif
19933+
19934 die("general protection fault", regs, error_code);
19935 }
19936
19937@@ -425,7 +454,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19938 /* It's safe to allow irq's after DR6 has been saved */
19939 preempt_conditional_sti(regs);
19940
19941- if (regs->flags & X86_VM_MASK) {
19942+ if (v8086_mode(regs)) {
19943 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
19944 X86_TRAP_DB);
19945 preempt_conditional_cli(regs);
19946@@ -440,7 +469,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19947 * We already checked v86 mode above, so we can check for kernel mode
19948 * by just checking the CPL of CS.
19949 */
19950- if ((dr6 & DR_STEP) && !user_mode(regs)) {
19951+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
19952 tsk->thread.debugreg6 &= ~DR_STEP;
19953 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
19954 regs->flags &= ~X86_EFLAGS_TF;
19955@@ -471,7 +500,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
19956 return;
19957 conditional_sti(regs);
19958
19959- if (!user_mode_vm(regs))
19960+ if (!user_mode(regs))
19961 {
19962 if (!fixup_exception(regs)) {
19963 task->thread.error_code = error_code;
19964diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
19965index b9242ba..50c5edd 100644
19966--- a/arch/x86/kernel/verify_cpu.S
19967+++ b/arch/x86/kernel/verify_cpu.S
19968@@ -20,6 +20,7 @@
19969 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
19970 * arch/x86/kernel/trampoline_64.S: secondary processor verification
19971 * arch/x86/kernel/head_32.S: processor startup
19972+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
19973 *
19974 * verify_cpu, returns the status of longmode and SSE in register %eax.
19975 * 0: Success 1: Failure
19976diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
19977index 255f58a..5e91150 100644
19978--- a/arch/x86/kernel/vm86_32.c
19979+++ b/arch/x86/kernel/vm86_32.c
19980@@ -41,6 +41,7 @@
19981 #include <linux/ptrace.h>
19982 #include <linux/audit.h>
19983 #include <linux/stddef.h>
19984+#include <linux/grsecurity.h>
19985
19986 #include <asm/uaccess.h>
19987 #include <asm/io.h>
19988@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
19989 do_exit(SIGSEGV);
19990 }
19991
19992- tss = &per_cpu(init_tss, get_cpu());
19993+ tss = init_tss + get_cpu();
19994 current->thread.sp0 = current->thread.saved_sp0;
19995 current->thread.sysenter_cs = __KERNEL_CS;
19996 load_sp0(tss, &current->thread);
19997@@ -210,6 +211,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
19998 struct task_struct *tsk;
19999 int tmp, ret = -EPERM;
20000
20001+#ifdef CONFIG_GRKERNSEC_VM86
20002+ if (!capable(CAP_SYS_RAWIO)) {
20003+ gr_handle_vm86();
20004+ goto out;
20005+ }
20006+#endif
20007+
20008 tsk = current;
20009 if (tsk->thread.saved_sp0)
20010 goto out;
20011@@ -240,6 +248,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
20012 int tmp, ret;
20013 struct vm86plus_struct __user *v86;
20014
20015+#ifdef CONFIG_GRKERNSEC_VM86
20016+ if (!capable(CAP_SYS_RAWIO)) {
20017+ gr_handle_vm86();
20018+ ret = -EPERM;
20019+ goto out;
20020+ }
20021+#endif
20022+
20023 tsk = current;
20024 switch (cmd) {
20025 case VM86_REQUEST_IRQ:
20026@@ -326,7 +342,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
20027 tsk->thread.saved_fs = info->regs32->fs;
20028 tsk->thread.saved_gs = get_user_gs(info->regs32);
20029
20030- tss = &per_cpu(init_tss, get_cpu());
20031+ tss = init_tss + get_cpu();
20032 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
20033 if (cpu_has_sep)
20034 tsk->thread.sysenter_cs = 0;
20035@@ -533,7 +549,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
20036 goto cannot_handle;
20037 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
20038 goto cannot_handle;
20039- intr_ptr = (unsigned long __user *) (i << 2);
20040+ intr_ptr = (__force unsigned long __user *) (i << 2);
20041 if (get_user(segoffs, intr_ptr))
20042 goto cannot_handle;
20043 if ((segoffs >> 16) == BIOSSEG)
20044diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
20045index 0f703f1..9e15f64 100644
20046--- a/arch/x86/kernel/vmlinux.lds.S
20047+++ b/arch/x86/kernel/vmlinux.lds.S
20048@@ -26,6 +26,13 @@
20049 #include <asm/page_types.h>
20050 #include <asm/cache.h>
20051 #include <asm/boot.h>
20052+#include <asm/segment.h>
20053+
20054+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20055+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
20056+#else
20057+#define __KERNEL_TEXT_OFFSET 0
20058+#endif
20059
20060 #undef i386 /* in case the preprocessor is a 32bit one */
20061
20062@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
20063
20064 PHDRS {
20065 text PT_LOAD FLAGS(5); /* R_E */
20066+#ifdef CONFIG_X86_32
20067+ module PT_LOAD FLAGS(5); /* R_E */
20068+#endif
20069+#ifdef CONFIG_XEN
20070+ rodata PT_LOAD FLAGS(5); /* R_E */
20071+#else
20072+ rodata PT_LOAD FLAGS(4); /* R__ */
20073+#endif
20074 data PT_LOAD FLAGS(6); /* RW_ */
20075-#ifdef CONFIG_X86_64
20076+ init.begin PT_LOAD FLAGS(6); /* RW_ */
20077 #ifdef CONFIG_SMP
20078 percpu PT_LOAD FLAGS(6); /* RW_ */
20079 #endif
20080+ text.init PT_LOAD FLAGS(5); /* R_E */
20081+ text.exit PT_LOAD FLAGS(5); /* R_E */
20082 init PT_LOAD FLAGS(7); /* RWE */
20083-#endif
20084 note PT_NOTE FLAGS(0); /* ___ */
20085 }
20086
20087 SECTIONS
20088 {
20089 #ifdef CONFIG_X86_32
20090- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20091- phys_startup_32 = startup_32 - LOAD_OFFSET;
20092+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20093 #else
20094- . = __START_KERNEL;
20095- phys_startup_64 = startup_64 - LOAD_OFFSET;
20096+ . = __START_KERNEL;
20097 #endif
20098
20099 /* Text and read-only data */
20100- .text : AT(ADDR(.text) - LOAD_OFFSET) {
20101- _text = .;
20102+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20103 /* bootstrapping code */
20104+#ifdef CONFIG_X86_32
20105+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20106+#else
20107+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20108+#endif
20109+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20110+ _text = .;
20111 HEAD_TEXT
20112 #ifdef CONFIG_X86_32
20113 . = ALIGN(PAGE_SIZE);
20114@@ -108,13 +128,47 @@ SECTIONS
20115 IRQENTRY_TEXT
20116 *(.fixup)
20117 *(.gnu.warning)
20118- /* End of text section */
20119- _etext = .;
20120 } :text = 0x9090
20121
20122- NOTES :text :note
20123+ . += __KERNEL_TEXT_OFFSET;
20124
20125- EXCEPTION_TABLE(16) :text = 0x9090
20126+#ifdef CONFIG_X86_32
20127+ . = ALIGN(PAGE_SIZE);
20128+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
20129+
20130+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
20131+ MODULES_EXEC_VADDR = .;
20132+ BYTE(0)
20133+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
20134+ . = ALIGN(HPAGE_SIZE);
20135+ MODULES_EXEC_END = . - 1;
20136+#endif
20137+
20138+ } :module
20139+#endif
20140+
20141+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
20142+ /* End of text section */
20143+ _etext = . - __KERNEL_TEXT_OFFSET;
20144+ }
20145+
20146+#ifdef CONFIG_X86_32
20147+ . = ALIGN(PAGE_SIZE);
20148+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
20149+ *(.idt)
20150+ . = ALIGN(PAGE_SIZE);
20151+ *(.empty_zero_page)
20152+ *(.initial_pg_fixmap)
20153+ *(.initial_pg_pmd)
20154+ *(.initial_page_table)
20155+ *(.swapper_pg_dir)
20156+ } :rodata
20157+#endif
20158+
20159+ . = ALIGN(PAGE_SIZE);
20160+ NOTES :rodata :note
20161+
20162+ EXCEPTION_TABLE(16) :rodata
20163
20164 #if defined(CONFIG_DEBUG_RODATA)
20165 /* .text should occupy whole number of pages */
20166@@ -126,16 +180,20 @@ SECTIONS
20167
20168 /* Data */
20169 .data : AT(ADDR(.data) - LOAD_OFFSET) {
20170+
20171+#ifdef CONFIG_PAX_KERNEXEC
20172+ . = ALIGN(HPAGE_SIZE);
20173+#else
20174+ . = ALIGN(PAGE_SIZE);
20175+#endif
20176+
20177 /* Start of data section */
20178 _sdata = .;
20179
20180 /* init_task */
20181 INIT_TASK_DATA(THREAD_SIZE)
20182
20183-#ifdef CONFIG_X86_32
20184- /* 32 bit has nosave before _edata */
20185 NOSAVE_DATA
20186-#endif
20187
20188 PAGE_ALIGNED_DATA(PAGE_SIZE)
20189
20190@@ -176,12 +234,19 @@ SECTIONS
20191 #endif /* CONFIG_X86_64 */
20192
20193 /* Init code and data - will be freed after init */
20194- . = ALIGN(PAGE_SIZE);
20195 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
20196+ BYTE(0)
20197+
20198+#ifdef CONFIG_PAX_KERNEXEC
20199+ . = ALIGN(HPAGE_SIZE);
20200+#else
20201+ . = ALIGN(PAGE_SIZE);
20202+#endif
20203+
20204 __init_begin = .; /* paired with __init_end */
20205- }
20206+ } :init.begin
20207
20208-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
20209+#ifdef CONFIG_SMP
20210 /*
20211 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
20212 * output PHDR, so the next output section - .init.text - should
20213@@ -190,12 +255,27 @@ SECTIONS
20214 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
20215 #endif
20216
20217- INIT_TEXT_SECTION(PAGE_SIZE)
20218-#ifdef CONFIG_X86_64
20219- :init
20220-#endif
20221+ . = ALIGN(PAGE_SIZE);
20222+ init_begin = .;
20223+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
20224+ VMLINUX_SYMBOL(_sinittext) = .;
20225+ INIT_TEXT
20226+ VMLINUX_SYMBOL(_einittext) = .;
20227+ . = ALIGN(PAGE_SIZE);
20228+ } :text.init
20229
20230- INIT_DATA_SECTION(16)
20231+ /*
20232+ * .exit.text is discard at runtime, not link time, to deal with
20233+ * references from .altinstructions and .eh_frame
20234+ */
20235+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20236+ EXIT_TEXT
20237+ . = ALIGN(16);
20238+ } :text.exit
20239+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
20240+
20241+ . = ALIGN(PAGE_SIZE);
20242+ INIT_DATA_SECTION(16) :init
20243
20244 /*
20245 * Code and data for a variety of lowlevel trampolines, to be
20246@@ -269,19 +349,12 @@ SECTIONS
20247 }
20248
20249 . = ALIGN(8);
20250- /*
20251- * .exit.text is discard at runtime, not link time, to deal with
20252- * references from .altinstructions and .eh_frame
20253- */
20254- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
20255- EXIT_TEXT
20256- }
20257
20258 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
20259 EXIT_DATA
20260 }
20261
20262-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
20263+#ifndef CONFIG_SMP
20264 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
20265 #endif
20266
20267@@ -300,16 +373,10 @@ SECTIONS
20268 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
20269 __smp_locks = .;
20270 *(.smp_locks)
20271- . = ALIGN(PAGE_SIZE);
20272 __smp_locks_end = .;
20273+ . = ALIGN(PAGE_SIZE);
20274 }
20275
20276-#ifdef CONFIG_X86_64
20277- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
20278- NOSAVE_DATA
20279- }
20280-#endif
20281-
20282 /* BSS */
20283 . = ALIGN(PAGE_SIZE);
20284 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
20285@@ -325,6 +392,7 @@ SECTIONS
20286 __brk_base = .;
20287 . += 64 * 1024; /* 64k alignment slop space */
20288 *(.brk_reservation) /* areas brk users have reserved */
20289+ . = ALIGN(HPAGE_SIZE);
20290 __brk_limit = .;
20291 }
20292
20293@@ -351,13 +419,12 @@ SECTIONS
20294 * for the boot processor.
20295 */
20296 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
20297-INIT_PER_CPU(gdt_page);
20298 INIT_PER_CPU(irq_stack_union);
20299
20300 /*
20301 * Build-time check on the image size:
20302 */
20303-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
20304+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
20305 "kernel image bigger than KERNEL_IMAGE_SIZE");
20306
20307 #ifdef CONFIG_SMP
20308diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
20309index 7515cf0..331a1a0 100644
20310--- a/arch/x86/kernel/vsyscall_64.c
20311+++ b/arch/x86/kernel/vsyscall_64.c
20312@@ -54,15 +54,13 @@
20313 DEFINE_VVAR(int, vgetcpu_mode);
20314 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
20315
20316-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
20317+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
20318
20319 static int __init vsyscall_setup(char *str)
20320 {
20321 if (str) {
20322 if (!strcmp("emulate", str))
20323 vsyscall_mode = EMULATE;
20324- else if (!strcmp("native", str))
20325- vsyscall_mode = NATIVE;
20326 else if (!strcmp("none", str))
20327 vsyscall_mode = NONE;
20328 else
20329@@ -206,7 +204,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
20330
20331 tsk = current;
20332 if (seccomp_mode(&tsk->seccomp))
20333- do_exit(SIGKILL);
20334+ do_group_exit(SIGKILL);
20335
20336 /*
20337 * With a real vsyscall, page faults cause SIGSEGV. We want to
20338@@ -278,8 +276,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
20339 return true;
20340
20341 sigsegv:
20342- force_sig(SIGSEGV, current);
20343- return true;
20344+ do_group_exit(SIGKILL);
20345 }
20346
20347 /*
20348@@ -332,10 +329,7 @@ void __init map_vsyscall(void)
20349 extern char __vvar_page;
20350 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
20351
20352- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
20353- vsyscall_mode == NATIVE
20354- ? PAGE_KERNEL_VSYSCALL
20355- : PAGE_KERNEL_VVAR);
20356+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
20357 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
20358 (unsigned long)VSYSCALL_START);
20359
20360diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
20361index 9796c2f..f686fbf 100644
20362--- a/arch/x86/kernel/x8664_ksyms_64.c
20363+++ b/arch/x86/kernel/x8664_ksyms_64.c
20364@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
20365 EXPORT_SYMBOL(copy_user_generic_string);
20366 EXPORT_SYMBOL(copy_user_generic_unrolled);
20367 EXPORT_SYMBOL(__copy_user_nocache);
20368-EXPORT_SYMBOL(_copy_from_user);
20369-EXPORT_SYMBOL(_copy_to_user);
20370
20371 EXPORT_SYMBOL(copy_page);
20372 EXPORT_SYMBOL(clear_page);
20373diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
20374index e62728e..5fc3a07 100644
20375--- a/arch/x86/kernel/xsave.c
20376+++ b/arch/x86/kernel/xsave.c
20377@@ -131,7 +131,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
20378 fx_sw_user->xstate_size > fx_sw_user->extended_size)
20379 return -EINVAL;
20380
20381- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
20382+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
20383 fx_sw_user->extended_size -
20384 FP_XSTATE_MAGIC2_SIZE));
20385 if (err)
20386@@ -267,7 +267,7 @@ fx_only:
20387 * the other extended state.
20388 */
20389 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
20390- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
20391+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
20392 }
20393
20394 /*
20395@@ -296,7 +296,7 @@ int restore_i387_xstate(void __user *buf)
20396 if (use_xsave())
20397 err = restore_user_xstate(buf);
20398 else
20399- err = fxrstor_checking((__force struct i387_fxsave_struct *)
20400+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
20401 buf);
20402 if (unlikely(err)) {
20403 /*
20404diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
20405index 9fed5be..18fd595 100644
20406--- a/arch/x86/kvm/cpuid.c
20407+++ b/arch/x86/kvm/cpuid.c
20408@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
20409 struct kvm_cpuid2 *cpuid,
20410 struct kvm_cpuid_entry2 __user *entries)
20411 {
20412- int r;
20413+ int r, i;
20414
20415 r = -E2BIG;
20416 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
20417 goto out;
20418 r = -EFAULT;
20419- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
20420- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20421+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20422 goto out;
20423+ for (i = 0; i < cpuid->nent; ++i) {
20424+ struct kvm_cpuid_entry2 cpuid_entry;
20425+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
20426+ goto out;
20427+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
20428+ }
20429 vcpu->arch.cpuid_nent = cpuid->nent;
20430 kvm_apic_set_version(vcpu);
20431 kvm_x86_ops->cpuid_update(vcpu);
20432@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
20433 struct kvm_cpuid2 *cpuid,
20434 struct kvm_cpuid_entry2 __user *entries)
20435 {
20436- int r;
20437+ int r, i;
20438
20439 r = -E2BIG;
20440 if (cpuid->nent < vcpu->arch.cpuid_nent)
20441 goto out;
20442 r = -EFAULT;
20443- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
20444- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20445+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20446 goto out;
20447+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
20448+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
20449+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
20450+ goto out;
20451+ }
20452 return 0;
20453
20454 out:
20455diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
20456index 8375622..b7bca1a 100644
20457--- a/arch/x86/kvm/emulate.c
20458+++ b/arch/x86/kvm/emulate.c
20459@@ -252,6 +252,7 @@ struct gprefix {
20460
20461 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
20462 do { \
20463+ unsigned long _tmp; \
20464 __asm__ __volatile__ ( \
20465 _PRE_EFLAGS("0", "4", "2") \
20466 _op _suffix " %"_x"3,%1; " \
20467@@ -266,8 +267,6 @@ struct gprefix {
20468 /* Raw emulation: instruction has two explicit operands. */
20469 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
20470 do { \
20471- unsigned long _tmp; \
20472- \
20473 switch ((ctxt)->dst.bytes) { \
20474 case 2: \
20475 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
20476@@ -283,7 +282,6 @@ struct gprefix {
20477
20478 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
20479 do { \
20480- unsigned long _tmp; \
20481 switch ((ctxt)->dst.bytes) { \
20482 case 1: \
20483 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
20484diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
20485index 8584322..17d5955 100644
20486--- a/arch/x86/kvm/lapic.c
20487+++ b/arch/x86/kvm/lapic.c
20488@@ -54,7 +54,7 @@
20489 #define APIC_BUS_CYCLE_NS 1
20490
20491 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
20492-#define apic_debug(fmt, arg...)
20493+#define apic_debug(fmt, arg...) do {} while (0)
20494
20495 #define APIC_LVT_NUM 6
20496 /* 14 is the version for Xeon and Pentium 8.4.8*/
20497diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
20498index df5a703..63748a7 100644
20499--- a/arch/x86/kvm/paging_tmpl.h
20500+++ b/arch/x86/kvm/paging_tmpl.h
20501@@ -197,7 +197,7 @@ retry_walk:
20502 if (unlikely(kvm_is_error_hva(host_addr)))
20503 goto error;
20504
20505- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
20506+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
20507 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
20508 goto error;
20509
20510diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
20511index e334389..6839087 100644
20512--- a/arch/x86/kvm/svm.c
20513+++ b/arch/x86/kvm/svm.c
20514@@ -3509,7 +3509,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
20515 int cpu = raw_smp_processor_id();
20516
20517 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
20518+
20519+ pax_open_kernel();
20520 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
20521+ pax_close_kernel();
20522+
20523 load_TR_desc();
20524 }
20525
20526@@ -3887,6 +3891,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
20527 #endif
20528 #endif
20529
20530+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20531+ __set_fs(current_thread_info()->addr_limit);
20532+#endif
20533+
20534 reload_tss(vcpu);
20535
20536 local_irq_disable();
20537diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
20538index 4ff0ab9..2ff68d3 100644
20539--- a/arch/x86/kvm/vmx.c
20540+++ b/arch/x86/kvm/vmx.c
20541@@ -1303,7 +1303,11 @@ static void reload_tss(void)
20542 struct desc_struct *descs;
20543
20544 descs = (void *)gdt->address;
20545+
20546+ pax_open_kernel();
20547 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
20548+ pax_close_kernel();
20549+
20550 load_TR_desc();
20551 }
20552
20553@@ -2625,8 +2629,11 @@ static __init int hardware_setup(void)
20554 if (!cpu_has_vmx_flexpriority())
20555 flexpriority_enabled = 0;
20556
20557- if (!cpu_has_vmx_tpr_shadow())
20558- kvm_x86_ops->update_cr8_intercept = NULL;
20559+ if (!cpu_has_vmx_tpr_shadow()) {
20560+ pax_open_kernel();
20561+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
20562+ pax_close_kernel();
20563+ }
20564
20565 if (enable_ept && !cpu_has_vmx_ept_2m_page())
20566 kvm_disable_largepages();
20567@@ -3642,7 +3649,7 @@ static void vmx_set_constant_host_state(void)
20568 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
20569
20570 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
20571- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
20572+ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
20573
20574 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
20575 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
20576@@ -6180,6 +6187,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20577 "jmp .Lkvm_vmx_return \n\t"
20578 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
20579 ".Lkvm_vmx_return: "
20580+
20581+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20582+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
20583+ ".Lkvm_vmx_return2: "
20584+#endif
20585+
20586 /* Save guest registers, load host registers, keep flags */
20587 "mov %0, %c[wordsize](%%"R"sp) \n\t"
20588 "pop %0 \n\t"
20589@@ -6228,6 +6241,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20590 #endif
20591 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
20592 [wordsize]"i"(sizeof(ulong))
20593+
20594+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20595+ ,[cs]"i"(__KERNEL_CS)
20596+#endif
20597+
20598 : "cc", "memory"
20599 , R"ax", R"bx", R"di", R"si"
20600 #ifdef CONFIG_X86_64
20601@@ -6256,7 +6274,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20602 }
20603 }
20604
20605- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
20606+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
20607+
20608+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20609+ loadsegment(fs, __KERNEL_PERCPU);
20610+#endif
20611+
20612+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20613+ __set_fs(current_thread_info()->addr_limit);
20614+#endif
20615+
20616 vmx->loaded_vmcs->launched = 1;
20617
20618 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
20619diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
20620index 185a2b8..866d2a6 100644
20621--- a/arch/x86/kvm/x86.c
20622+++ b/arch/x86/kvm/x86.c
20623@@ -1357,8 +1357,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
20624 {
20625 struct kvm *kvm = vcpu->kvm;
20626 int lm = is_long_mode(vcpu);
20627- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20628- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20629+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20630+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20631 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
20632 : kvm->arch.xen_hvm_config.blob_size_32;
20633 u32 page_num = data & ~PAGE_MASK;
20634@@ -2213,6 +2213,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
20635 if (n < msr_list.nmsrs)
20636 goto out;
20637 r = -EFAULT;
20638+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
20639+ goto out;
20640 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
20641 num_msrs_to_save * sizeof(u32)))
20642 goto out;
20643@@ -2338,7 +2340,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
20644 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
20645 struct kvm_interrupt *irq)
20646 {
20647- if (irq->irq < 0 || irq->irq >= 256)
20648+ if (irq->irq >= 256)
20649 return -EINVAL;
20650 if (irqchip_in_kernel(vcpu->kvm))
20651 return -ENXIO;
20652@@ -4860,7 +4862,7 @@ static void kvm_set_mmio_spte_mask(void)
20653 kvm_mmu_set_mmio_spte_mask(mask);
20654 }
20655
20656-int kvm_arch_init(void *opaque)
20657+int kvm_arch_init(const void *opaque)
20658 {
20659 int r;
20660 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
20661diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
20662index 642d880..44e0f3f 100644
20663--- a/arch/x86/lguest/boot.c
20664+++ b/arch/x86/lguest/boot.c
20665@@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
20666 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
20667 * Launcher to reboot us.
20668 */
20669-static void lguest_restart(char *reason)
20670+static __noreturn void lguest_restart(char *reason)
20671 {
20672 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
20673+ BUG();
20674 }
20675
20676 /*G:050
20677diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
20678index 00933d5..3a64af9 100644
20679--- a/arch/x86/lib/atomic64_386_32.S
20680+++ b/arch/x86/lib/atomic64_386_32.S
20681@@ -48,6 +48,10 @@ BEGIN(read)
20682 movl (v), %eax
20683 movl 4(v), %edx
20684 RET_ENDP
20685+BEGIN(read_unchecked)
20686+ movl (v), %eax
20687+ movl 4(v), %edx
20688+RET_ENDP
20689 #undef v
20690
20691 #define v %esi
20692@@ -55,6 +59,10 @@ BEGIN(set)
20693 movl %ebx, (v)
20694 movl %ecx, 4(v)
20695 RET_ENDP
20696+BEGIN(set_unchecked)
20697+ movl %ebx, (v)
20698+ movl %ecx, 4(v)
20699+RET_ENDP
20700 #undef v
20701
20702 #define v %esi
20703@@ -70,6 +78,20 @@ RET_ENDP
20704 BEGIN(add)
20705 addl %eax, (v)
20706 adcl %edx, 4(v)
20707+
20708+#ifdef CONFIG_PAX_REFCOUNT
20709+ jno 0f
20710+ subl %eax, (v)
20711+ sbbl %edx, 4(v)
20712+ int $4
20713+0:
20714+ _ASM_EXTABLE(0b, 0b)
20715+#endif
20716+
20717+RET_ENDP
20718+BEGIN(add_unchecked)
20719+ addl %eax, (v)
20720+ adcl %edx, 4(v)
20721 RET_ENDP
20722 #undef v
20723
20724@@ -77,6 +99,24 @@ RET_ENDP
20725 BEGIN(add_return)
20726 addl (v), %eax
20727 adcl 4(v), %edx
20728+
20729+#ifdef CONFIG_PAX_REFCOUNT
20730+ into
20731+1234:
20732+ _ASM_EXTABLE(1234b, 2f)
20733+#endif
20734+
20735+ movl %eax, (v)
20736+ movl %edx, 4(v)
20737+
20738+#ifdef CONFIG_PAX_REFCOUNT
20739+2:
20740+#endif
20741+
20742+RET_ENDP
20743+BEGIN(add_return_unchecked)
20744+ addl (v), %eax
20745+ adcl 4(v), %edx
20746 movl %eax, (v)
20747 movl %edx, 4(v)
20748 RET_ENDP
20749@@ -86,6 +126,20 @@ RET_ENDP
20750 BEGIN(sub)
20751 subl %eax, (v)
20752 sbbl %edx, 4(v)
20753+
20754+#ifdef CONFIG_PAX_REFCOUNT
20755+ jno 0f
20756+ addl %eax, (v)
20757+ adcl %edx, 4(v)
20758+ int $4
20759+0:
20760+ _ASM_EXTABLE(0b, 0b)
20761+#endif
20762+
20763+RET_ENDP
20764+BEGIN(sub_unchecked)
20765+ subl %eax, (v)
20766+ sbbl %edx, 4(v)
20767 RET_ENDP
20768 #undef v
20769
20770@@ -96,6 +150,27 @@ BEGIN(sub_return)
20771 sbbl $0, %edx
20772 addl (v), %eax
20773 adcl 4(v), %edx
20774+
20775+#ifdef CONFIG_PAX_REFCOUNT
20776+ into
20777+1234:
20778+ _ASM_EXTABLE(1234b, 2f)
20779+#endif
20780+
20781+ movl %eax, (v)
20782+ movl %edx, 4(v)
20783+
20784+#ifdef CONFIG_PAX_REFCOUNT
20785+2:
20786+#endif
20787+
20788+RET_ENDP
20789+BEGIN(sub_return_unchecked)
20790+ negl %edx
20791+ negl %eax
20792+ sbbl $0, %edx
20793+ addl (v), %eax
20794+ adcl 4(v), %edx
20795 movl %eax, (v)
20796 movl %edx, 4(v)
20797 RET_ENDP
20798@@ -105,6 +180,20 @@ RET_ENDP
20799 BEGIN(inc)
20800 addl $1, (v)
20801 adcl $0, 4(v)
20802+
20803+#ifdef CONFIG_PAX_REFCOUNT
20804+ jno 0f
20805+ subl $1, (v)
20806+ sbbl $0, 4(v)
20807+ int $4
20808+0:
20809+ _ASM_EXTABLE(0b, 0b)
20810+#endif
20811+
20812+RET_ENDP
20813+BEGIN(inc_unchecked)
20814+ addl $1, (v)
20815+ adcl $0, 4(v)
20816 RET_ENDP
20817 #undef v
20818
20819@@ -114,6 +203,26 @@ BEGIN(inc_return)
20820 movl 4(v), %edx
20821 addl $1, %eax
20822 adcl $0, %edx
20823+
20824+#ifdef CONFIG_PAX_REFCOUNT
20825+ into
20826+1234:
20827+ _ASM_EXTABLE(1234b, 2f)
20828+#endif
20829+
20830+ movl %eax, (v)
20831+ movl %edx, 4(v)
20832+
20833+#ifdef CONFIG_PAX_REFCOUNT
20834+2:
20835+#endif
20836+
20837+RET_ENDP
20838+BEGIN(inc_return_unchecked)
20839+ movl (v), %eax
20840+ movl 4(v), %edx
20841+ addl $1, %eax
20842+ adcl $0, %edx
20843 movl %eax, (v)
20844 movl %edx, 4(v)
20845 RET_ENDP
20846@@ -123,6 +232,20 @@ RET_ENDP
20847 BEGIN(dec)
20848 subl $1, (v)
20849 sbbl $0, 4(v)
20850+
20851+#ifdef CONFIG_PAX_REFCOUNT
20852+ jno 0f
20853+ addl $1, (v)
20854+ adcl $0, 4(v)
20855+ int $4
20856+0:
20857+ _ASM_EXTABLE(0b, 0b)
20858+#endif
20859+
20860+RET_ENDP
20861+BEGIN(dec_unchecked)
20862+ subl $1, (v)
20863+ sbbl $0, 4(v)
20864 RET_ENDP
20865 #undef v
20866
20867@@ -132,6 +255,26 @@ BEGIN(dec_return)
20868 movl 4(v), %edx
20869 subl $1, %eax
20870 sbbl $0, %edx
20871+
20872+#ifdef CONFIG_PAX_REFCOUNT
20873+ into
20874+1234:
20875+ _ASM_EXTABLE(1234b, 2f)
20876+#endif
20877+
20878+ movl %eax, (v)
20879+ movl %edx, 4(v)
20880+
20881+#ifdef CONFIG_PAX_REFCOUNT
20882+2:
20883+#endif
20884+
20885+RET_ENDP
20886+BEGIN(dec_return_unchecked)
20887+ movl (v), %eax
20888+ movl 4(v), %edx
20889+ subl $1, %eax
20890+ sbbl $0, %edx
20891 movl %eax, (v)
20892 movl %edx, 4(v)
20893 RET_ENDP
20894@@ -143,6 +286,13 @@ BEGIN(add_unless)
20895 adcl %edx, %edi
20896 addl (v), %eax
20897 adcl 4(v), %edx
20898+
20899+#ifdef CONFIG_PAX_REFCOUNT
20900+ into
20901+1234:
20902+ _ASM_EXTABLE(1234b, 2f)
20903+#endif
20904+
20905 cmpl %eax, %ecx
20906 je 3f
20907 1:
20908@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
20909 1:
20910 addl $1, %eax
20911 adcl $0, %edx
20912+
20913+#ifdef CONFIG_PAX_REFCOUNT
20914+ into
20915+1234:
20916+ _ASM_EXTABLE(1234b, 2f)
20917+#endif
20918+
20919 movl %eax, (v)
20920 movl %edx, 4(v)
20921 movl $1, %eax
20922@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
20923 movl 4(v), %edx
20924 subl $1, %eax
20925 sbbl $0, %edx
20926+
20927+#ifdef CONFIG_PAX_REFCOUNT
20928+ into
20929+1234:
20930+ _ASM_EXTABLE(1234b, 1f)
20931+#endif
20932+
20933 js 1f
20934 movl %eax, (v)
20935 movl %edx, 4(v)
20936diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
20937index f5cc9eb..51fa319 100644
20938--- a/arch/x86/lib/atomic64_cx8_32.S
20939+++ b/arch/x86/lib/atomic64_cx8_32.S
20940@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
20941 CFI_STARTPROC
20942
20943 read64 %ecx
20944+ pax_force_retaddr
20945 ret
20946 CFI_ENDPROC
20947 ENDPROC(atomic64_read_cx8)
20948
20949+ENTRY(atomic64_read_unchecked_cx8)
20950+ CFI_STARTPROC
20951+
20952+ read64 %ecx
20953+ pax_force_retaddr
20954+ ret
20955+ CFI_ENDPROC
20956+ENDPROC(atomic64_read_unchecked_cx8)
20957+
20958 ENTRY(atomic64_set_cx8)
20959 CFI_STARTPROC
20960
20961@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
20962 cmpxchg8b (%esi)
20963 jne 1b
20964
20965+ pax_force_retaddr
20966 ret
20967 CFI_ENDPROC
20968 ENDPROC(atomic64_set_cx8)
20969
20970+ENTRY(atomic64_set_unchecked_cx8)
20971+ CFI_STARTPROC
20972+
20973+1:
20974+/* we don't need LOCK_PREFIX since aligned 64-bit writes
20975+ * are atomic on 586 and newer */
20976+ cmpxchg8b (%esi)
20977+ jne 1b
20978+
20979+ pax_force_retaddr
20980+ ret
20981+ CFI_ENDPROC
20982+ENDPROC(atomic64_set_unchecked_cx8)
20983+
20984 ENTRY(atomic64_xchg_cx8)
20985 CFI_STARTPROC
20986
20987@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
20988 cmpxchg8b (%esi)
20989 jne 1b
20990
20991+ pax_force_retaddr
20992 ret
20993 CFI_ENDPROC
20994 ENDPROC(atomic64_xchg_cx8)
20995
20996-.macro addsub_return func ins insc
20997-ENTRY(atomic64_\func\()_return_cx8)
20998+.macro addsub_return func ins insc unchecked=""
20999+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
21000 CFI_STARTPROC
21001 SAVE ebp
21002 SAVE ebx
21003@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
21004 movl %edx, %ecx
21005 \ins\()l %esi, %ebx
21006 \insc\()l %edi, %ecx
21007+
21008+.ifb \unchecked
21009+#ifdef CONFIG_PAX_REFCOUNT
21010+ into
21011+2:
21012+ _ASM_EXTABLE(2b, 3f)
21013+#endif
21014+.endif
21015+
21016 LOCK_PREFIX
21017 cmpxchg8b (%ebp)
21018 jne 1b
21019-
21020-10:
21021 movl %ebx, %eax
21022 movl %ecx, %edx
21023+
21024+.ifb \unchecked
21025+#ifdef CONFIG_PAX_REFCOUNT
21026+3:
21027+#endif
21028+.endif
21029+
21030 RESTORE edi
21031 RESTORE esi
21032 RESTORE ebx
21033 RESTORE ebp
21034+ pax_force_retaddr
21035 ret
21036 CFI_ENDPROC
21037-ENDPROC(atomic64_\func\()_return_cx8)
21038+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
21039 .endm
21040
21041 addsub_return add add adc
21042 addsub_return sub sub sbb
21043+addsub_return add add adc _unchecked
21044+addsub_return sub sub sbb _unchecked
21045
21046-.macro incdec_return func ins insc
21047-ENTRY(atomic64_\func\()_return_cx8)
21048+.macro incdec_return func ins insc unchecked=""
21049+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
21050 CFI_STARTPROC
21051 SAVE ebx
21052
21053@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
21054 movl %edx, %ecx
21055 \ins\()l $1, %ebx
21056 \insc\()l $0, %ecx
21057+
21058+.ifb \unchecked
21059+#ifdef CONFIG_PAX_REFCOUNT
21060+ into
21061+2:
21062+ _ASM_EXTABLE(2b, 3f)
21063+#endif
21064+.endif
21065+
21066 LOCK_PREFIX
21067 cmpxchg8b (%esi)
21068 jne 1b
21069
21070-10:
21071 movl %ebx, %eax
21072 movl %ecx, %edx
21073+
21074+.ifb \unchecked
21075+#ifdef CONFIG_PAX_REFCOUNT
21076+3:
21077+#endif
21078+.endif
21079+
21080 RESTORE ebx
21081+ pax_force_retaddr
21082 ret
21083 CFI_ENDPROC
21084-ENDPROC(atomic64_\func\()_return_cx8)
21085+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
21086 .endm
21087
21088 incdec_return inc add adc
21089 incdec_return dec sub sbb
21090+incdec_return inc add adc _unchecked
21091+incdec_return dec sub sbb _unchecked
21092
21093 ENTRY(atomic64_dec_if_positive_cx8)
21094 CFI_STARTPROC
21095@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
21096 movl %edx, %ecx
21097 subl $1, %ebx
21098 sbb $0, %ecx
21099+
21100+#ifdef CONFIG_PAX_REFCOUNT
21101+ into
21102+1234:
21103+ _ASM_EXTABLE(1234b, 2f)
21104+#endif
21105+
21106 js 2f
21107 LOCK_PREFIX
21108 cmpxchg8b (%esi)
21109@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
21110 movl %ebx, %eax
21111 movl %ecx, %edx
21112 RESTORE ebx
21113+ pax_force_retaddr
21114 ret
21115 CFI_ENDPROC
21116 ENDPROC(atomic64_dec_if_positive_cx8)
21117@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
21118 movl %edx, %ecx
21119 addl %ebp, %ebx
21120 adcl %edi, %ecx
21121+
21122+#ifdef CONFIG_PAX_REFCOUNT
21123+ into
21124+1234:
21125+ _ASM_EXTABLE(1234b, 3f)
21126+#endif
21127+
21128 LOCK_PREFIX
21129 cmpxchg8b (%esi)
21130 jne 1b
21131@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
21132 CFI_ADJUST_CFA_OFFSET -8
21133 RESTORE ebx
21134 RESTORE ebp
21135+ pax_force_retaddr
21136 ret
21137 4:
21138 cmpl %edx, 4(%esp)
21139@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
21140 xorl %ecx, %ecx
21141 addl $1, %ebx
21142 adcl %edx, %ecx
21143+
21144+#ifdef CONFIG_PAX_REFCOUNT
21145+ into
21146+1234:
21147+ _ASM_EXTABLE(1234b, 3f)
21148+#endif
21149+
21150 LOCK_PREFIX
21151 cmpxchg8b (%esi)
21152 jne 1b
21153@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
21154 movl $1, %eax
21155 3:
21156 RESTORE ebx
21157+ pax_force_retaddr
21158 ret
21159 CFI_ENDPROC
21160 ENDPROC(atomic64_inc_not_zero_cx8)
21161diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21162index 78d16a5..fbcf666 100644
21163--- a/arch/x86/lib/checksum_32.S
21164+++ b/arch/x86/lib/checksum_32.S
21165@@ -28,7 +28,8 @@
21166 #include <linux/linkage.h>
21167 #include <asm/dwarf2.h>
21168 #include <asm/errno.h>
21169-
21170+#include <asm/segment.h>
21171+
21172 /*
21173 * computes a partial checksum, e.g. for TCP/UDP fragments
21174 */
21175@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21176
21177 #define ARGBASE 16
21178 #define FP 12
21179-
21180-ENTRY(csum_partial_copy_generic)
21181+
21182+ENTRY(csum_partial_copy_generic_to_user)
21183 CFI_STARTPROC
21184+
21185+#ifdef CONFIG_PAX_MEMORY_UDEREF
21186+ pushl_cfi %gs
21187+ popl_cfi %es
21188+ jmp csum_partial_copy_generic
21189+#endif
21190+
21191+ENTRY(csum_partial_copy_generic_from_user)
21192+
21193+#ifdef CONFIG_PAX_MEMORY_UDEREF
21194+ pushl_cfi %gs
21195+ popl_cfi %ds
21196+#endif
21197+
21198+ENTRY(csum_partial_copy_generic)
21199 subl $4,%esp
21200 CFI_ADJUST_CFA_OFFSET 4
21201 pushl_cfi %edi
21202@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
21203 jmp 4f
21204 SRC(1: movw (%esi), %bx )
21205 addl $2, %esi
21206-DST( movw %bx, (%edi) )
21207+DST( movw %bx, %es:(%edi) )
21208 addl $2, %edi
21209 addw %bx, %ax
21210 adcl $0, %eax
21211@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
21212 SRC(1: movl (%esi), %ebx )
21213 SRC( movl 4(%esi), %edx )
21214 adcl %ebx, %eax
21215-DST( movl %ebx, (%edi) )
21216+DST( movl %ebx, %es:(%edi) )
21217 adcl %edx, %eax
21218-DST( movl %edx, 4(%edi) )
21219+DST( movl %edx, %es:4(%edi) )
21220
21221 SRC( movl 8(%esi), %ebx )
21222 SRC( movl 12(%esi), %edx )
21223 adcl %ebx, %eax
21224-DST( movl %ebx, 8(%edi) )
21225+DST( movl %ebx, %es:8(%edi) )
21226 adcl %edx, %eax
21227-DST( movl %edx, 12(%edi) )
21228+DST( movl %edx, %es:12(%edi) )
21229
21230 SRC( movl 16(%esi), %ebx )
21231 SRC( movl 20(%esi), %edx )
21232 adcl %ebx, %eax
21233-DST( movl %ebx, 16(%edi) )
21234+DST( movl %ebx, %es:16(%edi) )
21235 adcl %edx, %eax
21236-DST( movl %edx, 20(%edi) )
21237+DST( movl %edx, %es:20(%edi) )
21238
21239 SRC( movl 24(%esi), %ebx )
21240 SRC( movl 28(%esi), %edx )
21241 adcl %ebx, %eax
21242-DST( movl %ebx, 24(%edi) )
21243+DST( movl %ebx, %es:24(%edi) )
21244 adcl %edx, %eax
21245-DST( movl %edx, 28(%edi) )
21246+DST( movl %edx, %es:28(%edi) )
21247
21248 lea 32(%esi), %esi
21249 lea 32(%edi), %edi
21250@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
21251 shrl $2, %edx # This clears CF
21252 SRC(3: movl (%esi), %ebx )
21253 adcl %ebx, %eax
21254-DST( movl %ebx, (%edi) )
21255+DST( movl %ebx, %es:(%edi) )
21256 lea 4(%esi), %esi
21257 lea 4(%edi), %edi
21258 dec %edx
21259@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
21260 jb 5f
21261 SRC( movw (%esi), %cx )
21262 leal 2(%esi), %esi
21263-DST( movw %cx, (%edi) )
21264+DST( movw %cx, %es:(%edi) )
21265 leal 2(%edi), %edi
21266 je 6f
21267 shll $16,%ecx
21268 SRC(5: movb (%esi), %cl )
21269-DST( movb %cl, (%edi) )
21270+DST( movb %cl, %es:(%edi) )
21271 6: addl %ecx, %eax
21272 adcl $0, %eax
21273 7:
21274@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
21275
21276 6001:
21277 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21278- movl $-EFAULT, (%ebx)
21279+ movl $-EFAULT, %ss:(%ebx)
21280
21281 # zero the complete destination - computing the rest
21282 # is too much work
21283@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
21284
21285 6002:
21286 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21287- movl $-EFAULT,(%ebx)
21288+ movl $-EFAULT,%ss:(%ebx)
21289 jmp 5000b
21290
21291 .previous
21292
21293+ pushl_cfi %ss
21294+ popl_cfi %ds
21295+ pushl_cfi %ss
21296+ popl_cfi %es
21297 popl_cfi %ebx
21298 CFI_RESTORE ebx
21299 popl_cfi %esi
21300@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
21301 popl_cfi %ecx # equivalent to addl $4,%esp
21302 ret
21303 CFI_ENDPROC
21304-ENDPROC(csum_partial_copy_generic)
21305+ENDPROC(csum_partial_copy_generic_to_user)
21306
21307 #else
21308
21309 /* Version for PentiumII/PPro */
21310
21311 #define ROUND1(x) \
21312+ nop; nop; nop; \
21313 SRC(movl x(%esi), %ebx ) ; \
21314 addl %ebx, %eax ; \
21315- DST(movl %ebx, x(%edi) ) ;
21316+ DST(movl %ebx, %es:x(%edi)) ;
21317
21318 #define ROUND(x) \
21319+ nop; nop; nop; \
21320 SRC(movl x(%esi), %ebx ) ; \
21321 adcl %ebx, %eax ; \
21322- DST(movl %ebx, x(%edi) ) ;
21323+ DST(movl %ebx, %es:x(%edi)) ;
21324
21325 #define ARGBASE 12
21326-
21327-ENTRY(csum_partial_copy_generic)
21328+
21329+ENTRY(csum_partial_copy_generic_to_user)
21330 CFI_STARTPROC
21331+
21332+#ifdef CONFIG_PAX_MEMORY_UDEREF
21333+ pushl_cfi %gs
21334+ popl_cfi %es
21335+ jmp csum_partial_copy_generic
21336+#endif
21337+
21338+ENTRY(csum_partial_copy_generic_from_user)
21339+
21340+#ifdef CONFIG_PAX_MEMORY_UDEREF
21341+ pushl_cfi %gs
21342+ popl_cfi %ds
21343+#endif
21344+
21345+ENTRY(csum_partial_copy_generic)
21346 pushl_cfi %ebx
21347 CFI_REL_OFFSET ebx, 0
21348 pushl_cfi %edi
21349@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
21350 subl %ebx, %edi
21351 lea -1(%esi),%edx
21352 andl $-32,%edx
21353- lea 3f(%ebx,%ebx), %ebx
21354+ lea 3f(%ebx,%ebx,2), %ebx
21355 testl %esi, %esi
21356 jmp *%ebx
21357 1: addl $64,%esi
21358@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
21359 jb 5f
21360 SRC( movw (%esi), %dx )
21361 leal 2(%esi), %esi
21362-DST( movw %dx, (%edi) )
21363+DST( movw %dx, %es:(%edi) )
21364 leal 2(%edi), %edi
21365 je 6f
21366 shll $16,%edx
21367 5:
21368 SRC( movb (%esi), %dl )
21369-DST( movb %dl, (%edi) )
21370+DST( movb %dl, %es:(%edi) )
21371 6: addl %edx, %eax
21372 adcl $0, %eax
21373 7:
21374 .section .fixup, "ax"
21375 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
21376- movl $-EFAULT, (%ebx)
21377+ movl $-EFAULT, %ss:(%ebx)
21378 # zero the complete destination (computing the rest is too much work)
21379 movl ARGBASE+8(%esp),%edi # dst
21380 movl ARGBASE+12(%esp),%ecx # len
21381@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
21382 rep; stosb
21383 jmp 7b
21384 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21385- movl $-EFAULT, (%ebx)
21386+ movl $-EFAULT, %ss:(%ebx)
21387 jmp 7b
21388 .previous
21389
21390+#ifdef CONFIG_PAX_MEMORY_UDEREF
21391+ pushl_cfi %ss
21392+ popl_cfi %ds
21393+ pushl_cfi %ss
21394+ popl_cfi %es
21395+#endif
21396+
21397 popl_cfi %esi
21398 CFI_RESTORE esi
21399 popl_cfi %edi
21400@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
21401 CFI_RESTORE ebx
21402 ret
21403 CFI_ENDPROC
21404-ENDPROC(csum_partial_copy_generic)
21405+ENDPROC(csum_partial_copy_generic_to_user)
21406
21407 #undef ROUND
21408 #undef ROUND1
21409diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
21410index f2145cf..cea889d 100644
21411--- a/arch/x86/lib/clear_page_64.S
21412+++ b/arch/x86/lib/clear_page_64.S
21413@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
21414 movl $4096/8,%ecx
21415 xorl %eax,%eax
21416 rep stosq
21417+ pax_force_retaddr
21418 ret
21419 CFI_ENDPROC
21420 ENDPROC(clear_page_c)
21421@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
21422 movl $4096,%ecx
21423 xorl %eax,%eax
21424 rep stosb
21425+ pax_force_retaddr
21426 ret
21427 CFI_ENDPROC
21428 ENDPROC(clear_page_c_e)
21429@@ -43,6 +45,7 @@ ENTRY(clear_page)
21430 leaq 64(%rdi),%rdi
21431 jnz .Lloop
21432 nop
21433+ pax_force_retaddr
21434 ret
21435 CFI_ENDPROC
21436 .Lclear_page_end:
21437@@ -58,7 +61,7 @@ ENDPROC(clear_page)
21438
21439 #include <asm/cpufeature.h>
21440
21441- .section .altinstr_replacement,"ax"
21442+ .section .altinstr_replacement,"a"
21443 1: .byte 0xeb /* jmp <disp8> */
21444 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
21445 2: .byte 0xeb /* jmp <disp8> */
21446diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
21447index 1e572c5..2a162cd 100644
21448--- a/arch/x86/lib/cmpxchg16b_emu.S
21449+++ b/arch/x86/lib/cmpxchg16b_emu.S
21450@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
21451
21452 popf
21453 mov $1, %al
21454+ pax_force_retaddr
21455 ret
21456
21457 not_same:
21458 popf
21459 xor %al,%al
21460+ pax_force_retaddr
21461 ret
21462
21463 CFI_ENDPROC
21464diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
21465index 6b34d04..dccb07f 100644
21466--- a/arch/x86/lib/copy_page_64.S
21467+++ b/arch/x86/lib/copy_page_64.S
21468@@ -9,6 +9,7 @@ copy_page_c:
21469 CFI_STARTPROC
21470 movl $4096/8,%ecx
21471 rep movsq
21472+ pax_force_retaddr
21473 ret
21474 CFI_ENDPROC
21475 ENDPROC(copy_page_c)
21476@@ -20,12 +21,14 @@ ENDPROC(copy_page_c)
21477
21478 ENTRY(copy_page)
21479 CFI_STARTPROC
21480- subq $2*8,%rsp
21481- CFI_ADJUST_CFA_OFFSET 2*8
21482+ subq $3*8,%rsp
21483+ CFI_ADJUST_CFA_OFFSET 3*8
21484 movq %rbx,(%rsp)
21485 CFI_REL_OFFSET rbx, 0
21486 movq %r12,1*8(%rsp)
21487 CFI_REL_OFFSET r12, 1*8
21488+ movq %r13,2*8(%rsp)
21489+ CFI_REL_OFFSET r13, 2*8
21490
21491 movl $(4096/64)-5,%ecx
21492 .p2align 4
21493@@ -37,7 +40,7 @@ ENTRY(copy_page)
21494 movq 16 (%rsi), %rdx
21495 movq 24 (%rsi), %r8
21496 movq 32 (%rsi), %r9
21497- movq 40 (%rsi), %r10
21498+ movq 40 (%rsi), %r13
21499 movq 48 (%rsi), %r11
21500 movq 56 (%rsi), %r12
21501
21502@@ -48,7 +51,7 @@ ENTRY(copy_page)
21503 movq %rdx, 16 (%rdi)
21504 movq %r8, 24 (%rdi)
21505 movq %r9, 32 (%rdi)
21506- movq %r10, 40 (%rdi)
21507+ movq %r13, 40 (%rdi)
21508 movq %r11, 48 (%rdi)
21509 movq %r12, 56 (%rdi)
21510
21511@@ -67,7 +70,7 @@ ENTRY(copy_page)
21512 movq 16 (%rsi), %rdx
21513 movq 24 (%rsi), %r8
21514 movq 32 (%rsi), %r9
21515- movq 40 (%rsi), %r10
21516+ movq 40 (%rsi), %r13
21517 movq 48 (%rsi), %r11
21518 movq 56 (%rsi), %r12
21519
21520@@ -76,7 +79,7 @@ ENTRY(copy_page)
21521 movq %rdx, 16 (%rdi)
21522 movq %r8, 24 (%rdi)
21523 movq %r9, 32 (%rdi)
21524- movq %r10, 40 (%rdi)
21525+ movq %r13, 40 (%rdi)
21526 movq %r11, 48 (%rdi)
21527 movq %r12, 56 (%rdi)
21528
21529@@ -89,8 +92,11 @@ ENTRY(copy_page)
21530 CFI_RESTORE rbx
21531 movq 1*8(%rsp),%r12
21532 CFI_RESTORE r12
21533- addq $2*8,%rsp
21534- CFI_ADJUST_CFA_OFFSET -2*8
21535+ movq 2*8(%rsp),%r13
21536+ CFI_RESTORE r13
21537+ addq $3*8,%rsp
21538+ CFI_ADJUST_CFA_OFFSET -3*8
21539+ pax_force_retaddr
21540 ret
21541 .Lcopy_page_end:
21542 CFI_ENDPROC
21543@@ -101,7 +107,7 @@ ENDPROC(copy_page)
21544
21545 #include <asm/cpufeature.h>
21546
21547- .section .altinstr_replacement,"ax"
21548+ .section .altinstr_replacement,"a"
21549 1: .byte 0xeb /* jmp <disp8> */
21550 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
21551 2:
21552diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
21553index 0248402..821c786 100644
21554--- a/arch/x86/lib/copy_user_64.S
21555+++ b/arch/x86/lib/copy_user_64.S
21556@@ -16,6 +16,7 @@
21557 #include <asm/thread_info.h>
21558 #include <asm/cpufeature.h>
21559 #include <asm/alternative-asm.h>
21560+#include <asm/pgtable.h>
21561
21562 /*
21563 * By placing feature2 after feature1 in altinstructions section, we logically
21564@@ -29,7 +30,7 @@
21565 .byte 0xe9 /* 32bit jump */
21566 .long \orig-1f /* by default jump to orig */
21567 1:
21568- .section .altinstr_replacement,"ax"
21569+ .section .altinstr_replacement,"a"
21570 2: .byte 0xe9 /* near jump with 32bit immediate */
21571 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
21572 3: .byte 0xe9 /* near jump with 32bit immediate */
21573@@ -71,47 +72,20 @@
21574 #endif
21575 .endm
21576
21577-/* Standard copy_to_user with segment limit checking */
21578-ENTRY(_copy_to_user)
21579- CFI_STARTPROC
21580- GET_THREAD_INFO(%rax)
21581- movq %rdi,%rcx
21582- addq %rdx,%rcx
21583- jc bad_to_user
21584- cmpq TI_addr_limit(%rax),%rcx
21585- ja bad_to_user
21586- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21587- copy_user_generic_unrolled,copy_user_generic_string, \
21588- copy_user_enhanced_fast_string
21589- CFI_ENDPROC
21590-ENDPROC(_copy_to_user)
21591-
21592-/* Standard copy_from_user with segment limit checking */
21593-ENTRY(_copy_from_user)
21594- CFI_STARTPROC
21595- GET_THREAD_INFO(%rax)
21596- movq %rsi,%rcx
21597- addq %rdx,%rcx
21598- jc bad_from_user
21599- cmpq TI_addr_limit(%rax),%rcx
21600- ja bad_from_user
21601- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21602- copy_user_generic_unrolled,copy_user_generic_string, \
21603- copy_user_enhanced_fast_string
21604- CFI_ENDPROC
21605-ENDPROC(_copy_from_user)
21606-
21607 .section .fixup,"ax"
21608 /* must zero dest */
21609 ENTRY(bad_from_user)
21610 bad_from_user:
21611 CFI_STARTPROC
21612+ testl %edx,%edx
21613+ js bad_to_user
21614 movl %edx,%ecx
21615 xorl %eax,%eax
21616 rep
21617 stosb
21618 bad_to_user:
21619 movl %edx,%eax
21620+ pax_force_retaddr
21621 ret
21622 CFI_ENDPROC
21623 ENDPROC(bad_from_user)
21624@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
21625 jz 17f
21626 1: movq (%rsi),%r8
21627 2: movq 1*8(%rsi),%r9
21628-3: movq 2*8(%rsi),%r10
21629+3: movq 2*8(%rsi),%rax
21630 4: movq 3*8(%rsi),%r11
21631 5: movq %r8,(%rdi)
21632 6: movq %r9,1*8(%rdi)
21633-7: movq %r10,2*8(%rdi)
21634+7: movq %rax,2*8(%rdi)
21635 8: movq %r11,3*8(%rdi)
21636 9: movq 4*8(%rsi),%r8
21637 10: movq 5*8(%rsi),%r9
21638-11: movq 6*8(%rsi),%r10
21639+11: movq 6*8(%rsi),%rax
21640 12: movq 7*8(%rsi),%r11
21641 13: movq %r8,4*8(%rdi)
21642 14: movq %r9,5*8(%rdi)
21643-15: movq %r10,6*8(%rdi)
21644+15: movq %rax,6*8(%rdi)
21645 16: movq %r11,7*8(%rdi)
21646 leaq 64(%rsi),%rsi
21647 leaq 64(%rdi),%rdi
21648@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
21649 decl %ecx
21650 jnz 21b
21651 23: xor %eax,%eax
21652+ pax_force_retaddr
21653 ret
21654
21655 .section .fixup,"ax"
21656@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
21657 3: rep
21658 movsb
21659 4: xorl %eax,%eax
21660+ pax_force_retaddr
21661 ret
21662
21663 .section .fixup,"ax"
21664@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
21665 1: rep
21666 movsb
21667 2: xorl %eax,%eax
21668+ pax_force_retaddr
21669 ret
21670
21671 .section .fixup,"ax"
21672diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
21673index cb0c112..e3a6895 100644
21674--- a/arch/x86/lib/copy_user_nocache_64.S
21675+++ b/arch/x86/lib/copy_user_nocache_64.S
21676@@ -8,12 +8,14 @@
21677
21678 #include <linux/linkage.h>
21679 #include <asm/dwarf2.h>
21680+#include <asm/alternative-asm.h>
21681
21682 #define FIX_ALIGNMENT 1
21683
21684 #include <asm/current.h>
21685 #include <asm/asm-offsets.h>
21686 #include <asm/thread_info.h>
21687+#include <asm/pgtable.h>
21688
21689 .macro ALIGN_DESTINATION
21690 #ifdef FIX_ALIGNMENT
21691@@ -50,6 +52,15 @@
21692 */
21693 ENTRY(__copy_user_nocache)
21694 CFI_STARTPROC
21695+
21696+#ifdef CONFIG_PAX_MEMORY_UDEREF
21697+ mov $PAX_USER_SHADOW_BASE,%rcx
21698+ cmp %rcx,%rsi
21699+ jae 1f
21700+ add %rcx,%rsi
21701+1:
21702+#endif
21703+
21704 cmpl $8,%edx
21705 jb 20f /* less then 8 bytes, go to byte copy loop */
21706 ALIGN_DESTINATION
21707@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
21708 jz 17f
21709 1: movq (%rsi),%r8
21710 2: movq 1*8(%rsi),%r9
21711-3: movq 2*8(%rsi),%r10
21712+3: movq 2*8(%rsi),%rax
21713 4: movq 3*8(%rsi),%r11
21714 5: movnti %r8,(%rdi)
21715 6: movnti %r9,1*8(%rdi)
21716-7: movnti %r10,2*8(%rdi)
21717+7: movnti %rax,2*8(%rdi)
21718 8: movnti %r11,3*8(%rdi)
21719 9: movq 4*8(%rsi),%r8
21720 10: movq 5*8(%rsi),%r9
21721-11: movq 6*8(%rsi),%r10
21722+11: movq 6*8(%rsi),%rax
21723 12: movq 7*8(%rsi),%r11
21724 13: movnti %r8,4*8(%rdi)
21725 14: movnti %r9,5*8(%rdi)
21726-15: movnti %r10,6*8(%rdi)
21727+15: movnti %rax,6*8(%rdi)
21728 16: movnti %r11,7*8(%rdi)
21729 leaq 64(%rsi),%rsi
21730 leaq 64(%rdi),%rdi
21731@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
21732 jnz 21b
21733 23: xorl %eax,%eax
21734 sfence
21735+ pax_force_retaddr
21736 ret
21737
21738 .section .fixup,"ax"
21739diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
21740index fb903b7..c92b7f7 100644
21741--- a/arch/x86/lib/csum-copy_64.S
21742+++ b/arch/x86/lib/csum-copy_64.S
21743@@ -8,6 +8,7 @@
21744 #include <linux/linkage.h>
21745 #include <asm/dwarf2.h>
21746 #include <asm/errno.h>
21747+#include <asm/alternative-asm.h>
21748
21749 /*
21750 * Checksum copy with exception handling.
21751@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
21752 CFI_RESTORE rbp
21753 addq $7*8, %rsp
21754 CFI_ADJUST_CFA_OFFSET -7*8
21755+ pax_force_retaddr 0, 1
21756 ret
21757 CFI_RESTORE_STATE
21758
21759diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
21760index 459b58a..9570bc7 100644
21761--- a/arch/x86/lib/csum-wrappers_64.c
21762+++ b/arch/x86/lib/csum-wrappers_64.c
21763@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
21764 len -= 2;
21765 }
21766 }
21767- isum = csum_partial_copy_generic((__force const void *)src,
21768+
21769+#ifdef CONFIG_PAX_MEMORY_UDEREF
21770+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21771+ src += PAX_USER_SHADOW_BASE;
21772+#endif
21773+
21774+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
21775 dst, len, isum, errp, NULL);
21776 if (unlikely(*errp))
21777 goto out_err;
21778@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
21779 }
21780
21781 *errp = 0;
21782- return csum_partial_copy_generic(src, (void __force *)dst,
21783+
21784+#ifdef CONFIG_PAX_MEMORY_UDEREF
21785+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
21786+ dst += PAX_USER_SHADOW_BASE;
21787+#endif
21788+
21789+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
21790 len, isum, NULL, errp);
21791 }
21792 EXPORT_SYMBOL(csum_partial_copy_to_user);
21793diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
21794index 51f1504..ddac4c1 100644
21795--- a/arch/x86/lib/getuser.S
21796+++ b/arch/x86/lib/getuser.S
21797@@ -33,15 +33,38 @@
21798 #include <asm/asm-offsets.h>
21799 #include <asm/thread_info.h>
21800 #include <asm/asm.h>
21801+#include <asm/segment.h>
21802+#include <asm/pgtable.h>
21803+#include <asm/alternative-asm.h>
21804+
21805+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21806+#define __copyuser_seg gs;
21807+#else
21808+#define __copyuser_seg
21809+#endif
21810
21811 .text
21812 ENTRY(__get_user_1)
21813 CFI_STARTPROC
21814+
21815+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21816 GET_THREAD_INFO(%_ASM_DX)
21817 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21818 jae bad_get_user
21819-1: movzb (%_ASM_AX),%edx
21820+
21821+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21822+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21823+ cmp %_ASM_DX,%_ASM_AX
21824+ jae 1234f
21825+ add %_ASM_DX,%_ASM_AX
21826+1234:
21827+#endif
21828+
21829+#endif
21830+
21831+1: __copyuser_seg movzb (%_ASM_AX),%edx
21832 xor %eax,%eax
21833+ pax_force_retaddr
21834 ret
21835 CFI_ENDPROC
21836 ENDPROC(__get_user_1)
21837@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
21838 ENTRY(__get_user_2)
21839 CFI_STARTPROC
21840 add $1,%_ASM_AX
21841+
21842+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21843 jc bad_get_user
21844 GET_THREAD_INFO(%_ASM_DX)
21845 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21846 jae bad_get_user
21847-2: movzwl -1(%_ASM_AX),%edx
21848+
21849+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21850+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21851+ cmp %_ASM_DX,%_ASM_AX
21852+ jae 1234f
21853+ add %_ASM_DX,%_ASM_AX
21854+1234:
21855+#endif
21856+
21857+#endif
21858+
21859+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
21860 xor %eax,%eax
21861+ pax_force_retaddr
21862 ret
21863 CFI_ENDPROC
21864 ENDPROC(__get_user_2)
21865@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
21866 ENTRY(__get_user_4)
21867 CFI_STARTPROC
21868 add $3,%_ASM_AX
21869+
21870+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21871 jc bad_get_user
21872 GET_THREAD_INFO(%_ASM_DX)
21873 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21874 jae bad_get_user
21875-3: mov -3(%_ASM_AX),%edx
21876+
21877+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21878+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21879+ cmp %_ASM_DX,%_ASM_AX
21880+ jae 1234f
21881+ add %_ASM_DX,%_ASM_AX
21882+1234:
21883+#endif
21884+
21885+#endif
21886+
21887+3: __copyuser_seg mov -3(%_ASM_AX),%edx
21888 xor %eax,%eax
21889+ pax_force_retaddr
21890 ret
21891 CFI_ENDPROC
21892 ENDPROC(__get_user_4)
21893@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
21894 GET_THREAD_INFO(%_ASM_DX)
21895 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21896 jae bad_get_user
21897+
21898+#ifdef CONFIG_PAX_MEMORY_UDEREF
21899+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21900+ cmp %_ASM_DX,%_ASM_AX
21901+ jae 1234f
21902+ add %_ASM_DX,%_ASM_AX
21903+1234:
21904+#endif
21905+
21906 4: movq -7(%_ASM_AX),%_ASM_DX
21907 xor %eax,%eax
21908+ pax_force_retaddr
21909 ret
21910 CFI_ENDPROC
21911 ENDPROC(__get_user_8)
21912@@ -91,6 +152,7 @@ bad_get_user:
21913 CFI_STARTPROC
21914 xor %edx,%edx
21915 mov $(-EFAULT),%_ASM_AX
21916+ pax_force_retaddr
21917 ret
21918 CFI_ENDPROC
21919 END(bad_get_user)
21920diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
21921index b1e6c4b..21ae8fc 100644
21922--- a/arch/x86/lib/insn.c
21923+++ b/arch/x86/lib/insn.c
21924@@ -21,6 +21,11 @@
21925 #include <linux/string.h>
21926 #include <asm/inat.h>
21927 #include <asm/insn.h>
21928+#ifdef __KERNEL__
21929+#include <asm/pgtable_types.h>
21930+#else
21931+#define ktla_ktva(addr) addr
21932+#endif
21933
21934 /* Verify next sizeof(t) bytes can be on the same instruction */
21935 #define validate_next(t, insn, n) \
21936@@ -49,8 +54,8 @@
21937 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
21938 {
21939 memset(insn, 0, sizeof(*insn));
21940- insn->kaddr = kaddr;
21941- insn->next_byte = kaddr;
21942+ insn->kaddr = ktla_ktva(kaddr);
21943+ insn->next_byte = ktla_ktva(kaddr);
21944 insn->x86_64 = x86_64 ? 1 : 0;
21945 insn->opnd_bytes = 4;
21946 if (x86_64)
21947diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
21948index 05a95e7..326f2fa 100644
21949--- a/arch/x86/lib/iomap_copy_64.S
21950+++ b/arch/x86/lib/iomap_copy_64.S
21951@@ -17,6 +17,7 @@
21952
21953 #include <linux/linkage.h>
21954 #include <asm/dwarf2.h>
21955+#include <asm/alternative-asm.h>
21956
21957 /*
21958 * override generic version in lib/iomap_copy.c
21959@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
21960 CFI_STARTPROC
21961 movl %edx,%ecx
21962 rep movsd
21963+ pax_force_retaddr
21964 ret
21965 CFI_ENDPROC
21966 ENDPROC(__iowrite32_copy)
21967diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
21968index 1c273be..da9cc0e 100644
21969--- a/arch/x86/lib/memcpy_64.S
21970+++ b/arch/x86/lib/memcpy_64.S
21971@@ -33,6 +33,7 @@
21972 rep movsq
21973 movl %edx, %ecx
21974 rep movsb
21975+ pax_force_retaddr
21976 ret
21977 .Lmemcpy_e:
21978 .previous
21979@@ -49,6 +50,7 @@
21980 movq %rdi, %rax
21981 movq %rdx, %rcx
21982 rep movsb
21983+ pax_force_retaddr
21984 ret
21985 .Lmemcpy_e_e:
21986 .previous
21987@@ -76,13 +78,13 @@ ENTRY(memcpy)
21988 */
21989 movq 0*8(%rsi), %r8
21990 movq 1*8(%rsi), %r9
21991- movq 2*8(%rsi), %r10
21992+ movq 2*8(%rsi), %rcx
21993 movq 3*8(%rsi), %r11
21994 leaq 4*8(%rsi), %rsi
21995
21996 movq %r8, 0*8(%rdi)
21997 movq %r9, 1*8(%rdi)
21998- movq %r10, 2*8(%rdi)
21999+ movq %rcx, 2*8(%rdi)
22000 movq %r11, 3*8(%rdi)
22001 leaq 4*8(%rdi), %rdi
22002 jae .Lcopy_forward_loop
22003@@ -105,12 +107,12 @@ ENTRY(memcpy)
22004 subq $0x20, %rdx
22005 movq -1*8(%rsi), %r8
22006 movq -2*8(%rsi), %r9
22007- movq -3*8(%rsi), %r10
22008+ movq -3*8(%rsi), %rcx
22009 movq -4*8(%rsi), %r11
22010 leaq -4*8(%rsi), %rsi
22011 movq %r8, -1*8(%rdi)
22012 movq %r9, -2*8(%rdi)
22013- movq %r10, -3*8(%rdi)
22014+ movq %rcx, -3*8(%rdi)
22015 movq %r11, -4*8(%rdi)
22016 leaq -4*8(%rdi), %rdi
22017 jae .Lcopy_backward_loop
22018@@ -130,12 +132,13 @@ ENTRY(memcpy)
22019 */
22020 movq 0*8(%rsi), %r8
22021 movq 1*8(%rsi), %r9
22022- movq -2*8(%rsi, %rdx), %r10
22023+ movq -2*8(%rsi, %rdx), %rcx
22024 movq -1*8(%rsi, %rdx), %r11
22025 movq %r8, 0*8(%rdi)
22026 movq %r9, 1*8(%rdi)
22027- movq %r10, -2*8(%rdi, %rdx)
22028+ movq %rcx, -2*8(%rdi, %rdx)
22029 movq %r11, -1*8(%rdi, %rdx)
22030+ pax_force_retaddr
22031 retq
22032 .p2align 4
22033 .Lless_16bytes:
22034@@ -148,6 +151,7 @@ ENTRY(memcpy)
22035 movq -1*8(%rsi, %rdx), %r9
22036 movq %r8, 0*8(%rdi)
22037 movq %r9, -1*8(%rdi, %rdx)
22038+ pax_force_retaddr
22039 retq
22040 .p2align 4
22041 .Lless_8bytes:
22042@@ -161,6 +165,7 @@ ENTRY(memcpy)
22043 movl -4(%rsi, %rdx), %r8d
22044 movl %ecx, (%rdi)
22045 movl %r8d, -4(%rdi, %rdx)
22046+ pax_force_retaddr
22047 retq
22048 .p2align 4
22049 .Lless_3bytes:
22050@@ -179,6 +184,7 @@ ENTRY(memcpy)
22051 movb %cl, (%rdi)
22052
22053 .Lend:
22054+ pax_force_retaddr
22055 retq
22056 CFI_ENDPROC
22057 ENDPROC(memcpy)
22058diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
22059index ee16461..c39c199 100644
22060--- a/arch/x86/lib/memmove_64.S
22061+++ b/arch/x86/lib/memmove_64.S
22062@@ -61,13 +61,13 @@ ENTRY(memmove)
22063 5:
22064 sub $0x20, %rdx
22065 movq 0*8(%rsi), %r11
22066- movq 1*8(%rsi), %r10
22067+ movq 1*8(%rsi), %rcx
22068 movq 2*8(%rsi), %r9
22069 movq 3*8(%rsi), %r8
22070 leaq 4*8(%rsi), %rsi
22071
22072 movq %r11, 0*8(%rdi)
22073- movq %r10, 1*8(%rdi)
22074+ movq %rcx, 1*8(%rdi)
22075 movq %r9, 2*8(%rdi)
22076 movq %r8, 3*8(%rdi)
22077 leaq 4*8(%rdi), %rdi
22078@@ -81,10 +81,10 @@ ENTRY(memmove)
22079 4:
22080 movq %rdx, %rcx
22081 movq -8(%rsi, %rdx), %r11
22082- lea -8(%rdi, %rdx), %r10
22083+ lea -8(%rdi, %rdx), %r9
22084 shrq $3, %rcx
22085 rep movsq
22086- movq %r11, (%r10)
22087+ movq %r11, (%r9)
22088 jmp 13f
22089 .Lmemmove_end_forward:
22090
22091@@ -95,14 +95,14 @@ ENTRY(memmove)
22092 7:
22093 movq %rdx, %rcx
22094 movq (%rsi), %r11
22095- movq %rdi, %r10
22096+ movq %rdi, %r9
22097 leaq -8(%rsi, %rdx), %rsi
22098 leaq -8(%rdi, %rdx), %rdi
22099 shrq $3, %rcx
22100 std
22101 rep movsq
22102 cld
22103- movq %r11, (%r10)
22104+ movq %r11, (%r9)
22105 jmp 13f
22106
22107 /*
22108@@ -127,13 +127,13 @@ ENTRY(memmove)
22109 8:
22110 subq $0x20, %rdx
22111 movq -1*8(%rsi), %r11
22112- movq -2*8(%rsi), %r10
22113+ movq -2*8(%rsi), %rcx
22114 movq -3*8(%rsi), %r9
22115 movq -4*8(%rsi), %r8
22116 leaq -4*8(%rsi), %rsi
22117
22118 movq %r11, -1*8(%rdi)
22119- movq %r10, -2*8(%rdi)
22120+ movq %rcx, -2*8(%rdi)
22121 movq %r9, -3*8(%rdi)
22122 movq %r8, -4*8(%rdi)
22123 leaq -4*8(%rdi), %rdi
22124@@ -151,11 +151,11 @@ ENTRY(memmove)
22125 * Move data from 16 bytes to 31 bytes.
22126 */
22127 movq 0*8(%rsi), %r11
22128- movq 1*8(%rsi), %r10
22129+ movq 1*8(%rsi), %rcx
22130 movq -2*8(%rsi, %rdx), %r9
22131 movq -1*8(%rsi, %rdx), %r8
22132 movq %r11, 0*8(%rdi)
22133- movq %r10, 1*8(%rdi)
22134+ movq %rcx, 1*8(%rdi)
22135 movq %r9, -2*8(%rdi, %rdx)
22136 movq %r8, -1*8(%rdi, %rdx)
22137 jmp 13f
22138@@ -167,9 +167,9 @@ ENTRY(memmove)
22139 * Move data from 8 bytes to 15 bytes.
22140 */
22141 movq 0*8(%rsi), %r11
22142- movq -1*8(%rsi, %rdx), %r10
22143+ movq -1*8(%rsi, %rdx), %r9
22144 movq %r11, 0*8(%rdi)
22145- movq %r10, -1*8(%rdi, %rdx)
22146+ movq %r9, -1*8(%rdi, %rdx)
22147 jmp 13f
22148 10:
22149 cmpq $4, %rdx
22150@@ -178,9 +178,9 @@ ENTRY(memmove)
22151 * Move data from 4 bytes to 7 bytes.
22152 */
22153 movl (%rsi), %r11d
22154- movl -4(%rsi, %rdx), %r10d
22155+ movl -4(%rsi, %rdx), %r9d
22156 movl %r11d, (%rdi)
22157- movl %r10d, -4(%rdi, %rdx)
22158+ movl %r9d, -4(%rdi, %rdx)
22159 jmp 13f
22160 11:
22161 cmp $2, %rdx
22162@@ -189,9 +189,9 @@ ENTRY(memmove)
22163 * Move data from 2 bytes to 3 bytes.
22164 */
22165 movw (%rsi), %r11w
22166- movw -2(%rsi, %rdx), %r10w
22167+ movw -2(%rsi, %rdx), %r9w
22168 movw %r11w, (%rdi)
22169- movw %r10w, -2(%rdi, %rdx)
22170+ movw %r9w, -2(%rdi, %rdx)
22171 jmp 13f
22172 12:
22173 cmp $1, %rdx
22174@@ -202,6 +202,7 @@ ENTRY(memmove)
22175 movb (%rsi), %r11b
22176 movb %r11b, (%rdi)
22177 13:
22178+ pax_force_retaddr
22179 retq
22180 CFI_ENDPROC
22181
22182@@ -210,6 +211,7 @@ ENTRY(memmove)
22183 /* Forward moving data. */
22184 movq %rdx, %rcx
22185 rep movsb
22186+ pax_force_retaddr
22187 retq
22188 .Lmemmove_end_forward_efs:
22189 .previous
22190diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22191index 2dcb380..963660a 100644
22192--- a/arch/x86/lib/memset_64.S
22193+++ b/arch/x86/lib/memset_64.S
22194@@ -30,6 +30,7 @@
22195 movl %edx,%ecx
22196 rep stosb
22197 movq %r9,%rax
22198+ pax_force_retaddr
22199 ret
22200 .Lmemset_e:
22201 .previous
22202@@ -52,6 +53,7 @@
22203 movq %rdx,%rcx
22204 rep stosb
22205 movq %r9,%rax
22206+ pax_force_retaddr
22207 ret
22208 .Lmemset_e_e:
22209 .previous
22210@@ -59,7 +61,7 @@
22211 ENTRY(memset)
22212 ENTRY(__memset)
22213 CFI_STARTPROC
22214- movq %rdi,%r10
22215+ movq %rdi,%r11
22216
22217 /* expand byte value */
22218 movzbl %sil,%ecx
22219@@ -117,7 +119,8 @@ ENTRY(__memset)
22220 jnz .Lloop_1
22221
22222 .Lende:
22223- movq %r10,%rax
22224+ movq %r11,%rax
22225+ pax_force_retaddr
22226 ret
22227
22228 CFI_RESTORE_STATE
22229diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22230index c9f2d9b..e7fd2c0 100644
22231--- a/arch/x86/lib/mmx_32.c
22232+++ b/arch/x86/lib/mmx_32.c
22233@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22234 {
22235 void *p;
22236 int i;
22237+ unsigned long cr0;
22238
22239 if (unlikely(in_interrupt()))
22240 return __memcpy(to, from, len);
22241@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22242 kernel_fpu_begin();
22243
22244 __asm__ __volatile__ (
22245- "1: prefetch (%0)\n" /* This set is 28 bytes */
22246- " prefetch 64(%0)\n"
22247- " prefetch 128(%0)\n"
22248- " prefetch 192(%0)\n"
22249- " prefetch 256(%0)\n"
22250+ "1: prefetch (%1)\n" /* This set is 28 bytes */
22251+ " prefetch 64(%1)\n"
22252+ " prefetch 128(%1)\n"
22253+ " prefetch 192(%1)\n"
22254+ " prefetch 256(%1)\n"
22255 "2: \n"
22256 ".section .fixup, \"ax\"\n"
22257- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22258+ "3: \n"
22259+
22260+#ifdef CONFIG_PAX_KERNEXEC
22261+ " movl %%cr0, %0\n"
22262+ " movl %0, %%eax\n"
22263+ " andl $0xFFFEFFFF, %%eax\n"
22264+ " movl %%eax, %%cr0\n"
22265+#endif
22266+
22267+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22268+
22269+#ifdef CONFIG_PAX_KERNEXEC
22270+ " movl %0, %%cr0\n"
22271+#endif
22272+
22273 " jmp 2b\n"
22274 ".previous\n"
22275 _ASM_EXTABLE(1b, 3b)
22276- : : "r" (from));
22277+ : "=&r" (cr0) : "r" (from) : "ax");
22278
22279 for ( ; i > 5; i--) {
22280 __asm__ __volatile__ (
22281- "1: prefetch 320(%0)\n"
22282- "2: movq (%0), %%mm0\n"
22283- " movq 8(%0), %%mm1\n"
22284- " movq 16(%0), %%mm2\n"
22285- " movq 24(%0), %%mm3\n"
22286- " movq %%mm0, (%1)\n"
22287- " movq %%mm1, 8(%1)\n"
22288- " movq %%mm2, 16(%1)\n"
22289- " movq %%mm3, 24(%1)\n"
22290- " movq 32(%0), %%mm0\n"
22291- " movq 40(%0), %%mm1\n"
22292- " movq 48(%0), %%mm2\n"
22293- " movq 56(%0), %%mm3\n"
22294- " movq %%mm0, 32(%1)\n"
22295- " movq %%mm1, 40(%1)\n"
22296- " movq %%mm2, 48(%1)\n"
22297- " movq %%mm3, 56(%1)\n"
22298+ "1: prefetch 320(%1)\n"
22299+ "2: movq (%1), %%mm0\n"
22300+ " movq 8(%1), %%mm1\n"
22301+ " movq 16(%1), %%mm2\n"
22302+ " movq 24(%1), %%mm3\n"
22303+ " movq %%mm0, (%2)\n"
22304+ " movq %%mm1, 8(%2)\n"
22305+ " movq %%mm2, 16(%2)\n"
22306+ " movq %%mm3, 24(%2)\n"
22307+ " movq 32(%1), %%mm0\n"
22308+ " movq 40(%1), %%mm1\n"
22309+ " movq 48(%1), %%mm2\n"
22310+ " movq 56(%1), %%mm3\n"
22311+ " movq %%mm0, 32(%2)\n"
22312+ " movq %%mm1, 40(%2)\n"
22313+ " movq %%mm2, 48(%2)\n"
22314+ " movq %%mm3, 56(%2)\n"
22315 ".section .fixup, \"ax\"\n"
22316- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22317+ "3:\n"
22318+
22319+#ifdef CONFIG_PAX_KERNEXEC
22320+ " movl %%cr0, %0\n"
22321+ " movl %0, %%eax\n"
22322+ " andl $0xFFFEFFFF, %%eax\n"
22323+ " movl %%eax, %%cr0\n"
22324+#endif
22325+
22326+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22327+
22328+#ifdef CONFIG_PAX_KERNEXEC
22329+ " movl %0, %%cr0\n"
22330+#endif
22331+
22332 " jmp 2b\n"
22333 ".previous\n"
22334 _ASM_EXTABLE(1b, 3b)
22335- : : "r" (from), "r" (to) : "memory");
22336+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22337
22338 from += 64;
22339 to += 64;
22340@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22341 static void fast_copy_page(void *to, void *from)
22342 {
22343 int i;
22344+ unsigned long cr0;
22345
22346 kernel_fpu_begin();
22347
22348@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22349 * but that is for later. -AV
22350 */
22351 __asm__ __volatile__(
22352- "1: prefetch (%0)\n"
22353- " prefetch 64(%0)\n"
22354- " prefetch 128(%0)\n"
22355- " prefetch 192(%0)\n"
22356- " prefetch 256(%0)\n"
22357+ "1: prefetch (%1)\n"
22358+ " prefetch 64(%1)\n"
22359+ " prefetch 128(%1)\n"
22360+ " prefetch 192(%1)\n"
22361+ " prefetch 256(%1)\n"
22362 "2: \n"
22363 ".section .fixup, \"ax\"\n"
22364- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22365+ "3: \n"
22366+
22367+#ifdef CONFIG_PAX_KERNEXEC
22368+ " movl %%cr0, %0\n"
22369+ " movl %0, %%eax\n"
22370+ " andl $0xFFFEFFFF, %%eax\n"
22371+ " movl %%eax, %%cr0\n"
22372+#endif
22373+
22374+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22375+
22376+#ifdef CONFIG_PAX_KERNEXEC
22377+ " movl %0, %%cr0\n"
22378+#endif
22379+
22380 " jmp 2b\n"
22381 ".previous\n"
22382- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22383+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22384
22385 for (i = 0; i < (4096-320)/64; i++) {
22386 __asm__ __volatile__ (
22387- "1: prefetch 320(%0)\n"
22388- "2: movq (%0), %%mm0\n"
22389- " movntq %%mm0, (%1)\n"
22390- " movq 8(%0), %%mm1\n"
22391- " movntq %%mm1, 8(%1)\n"
22392- " movq 16(%0), %%mm2\n"
22393- " movntq %%mm2, 16(%1)\n"
22394- " movq 24(%0), %%mm3\n"
22395- " movntq %%mm3, 24(%1)\n"
22396- " movq 32(%0), %%mm4\n"
22397- " movntq %%mm4, 32(%1)\n"
22398- " movq 40(%0), %%mm5\n"
22399- " movntq %%mm5, 40(%1)\n"
22400- " movq 48(%0), %%mm6\n"
22401- " movntq %%mm6, 48(%1)\n"
22402- " movq 56(%0), %%mm7\n"
22403- " movntq %%mm7, 56(%1)\n"
22404+ "1: prefetch 320(%1)\n"
22405+ "2: movq (%1), %%mm0\n"
22406+ " movntq %%mm0, (%2)\n"
22407+ " movq 8(%1), %%mm1\n"
22408+ " movntq %%mm1, 8(%2)\n"
22409+ " movq 16(%1), %%mm2\n"
22410+ " movntq %%mm2, 16(%2)\n"
22411+ " movq 24(%1), %%mm3\n"
22412+ " movntq %%mm3, 24(%2)\n"
22413+ " movq 32(%1), %%mm4\n"
22414+ " movntq %%mm4, 32(%2)\n"
22415+ " movq 40(%1), %%mm5\n"
22416+ " movntq %%mm5, 40(%2)\n"
22417+ " movq 48(%1), %%mm6\n"
22418+ " movntq %%mm6, 48(%2)\n"
22419+ " movq 56(%1), %%mm7\n"
22420+ " movntq %%mm7, 56(%2)\n"
22421 ".section .fixup, \"ax\"\n"
22422- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22423+ "3:\n"
22424+
22425+#ifdef CONFIG_PAX_KERNEXEC
22426+ " movl %%cr0, %0\n"
22427+ " movl %0, %%eax\n"
22428+ " andl $0xFFFEFFFF, %%eax\n"
22429+ " movl %%eax, %%cr0\n"
22430+#endif
22431+
22432+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22433+
22434+#ifdef CONFIG_PAX_KERNEXEC
22435+ " movl %0, %%cr0\n"
22436+#endif
22437+
22438 " jmp 2b\n"
22439 ".previous\n"
22440- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22441+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22442
22443 from += 64;
22444 to += 64;
22445@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22446 static void fast_copy_page(void *to, void *from)
22447 {
22448 int i;
22449+ unsigned long cr0;
22450
22451 kernel_fpu_begin();
22452
22453 __asm__ __volatile__ (
22454- "1: prefetch (%0)\n"
22455- " prefetch 64(%0)\n"
22456- " prefetch 128(%0)\n"
22457- " prefetch 192(%0)\n"
22458- " prefetch 256(%0)\n"
22459+ "1: prefetch (%1)\n"
22460+ " prefetch 64(%1)\n"
22461+ " prefetch 128(%1)\n"
22462+ " prefetch 192(%1)\n"
22463+ " prefetch 256(%1)\n"
22464 "2: \n"
22465 ".section .fixup, \"ax\"\n"
22466- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22467+ "3: \n"
22468+
22469+#ifdef CONFIG_PAX_KERNEXEC
22470+ " movl %%cr0, %0\n"
22471+ " movl %0, %%eax\n"
22472+ " andl $0xFFFEFFFF, %%eax\n"
22473+ " movl %%eax, %%cr0\n"
22474+#endif
22475+
22476+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22477+
22478+#ifdef CONFIG_PAX_KERNEXEC
22479+ " movl %0, %%cr0\n"
22480+#endif
22481+
22482 " jmp 2b\n"
22483 ".previous\n"
22484- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22485+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22486
22487 for (i = 0; i < 4096/64; i++) {
22488 __asm__ __volatile__ (
22489- "1: prefetch 320(%0)\n"
22490- "2: movq (%0), %%mm0\n"
22491- " movq 8(%0), %%mm1\n"
22492- " movq 16(%0), %%mm2\n"
22493- " movq 24(%0), %%mm3\n"
22494- " movq %%mm0, (%1)\n"
22495- " movq %%mm1, 8(%1)\n"
22496- " movq %%mm2, 16(%1)\n"
22497- " movq %%mm3, 24(%1)\n"
22498- " movq 32(%0), %%mm0\n"
22499- " movq 40(%0), %%mm1\n"
22500- " movq 48(%0), %%mm2\n"
22501- " movq 56(%0), %%mm3\n"
22502- " movq %%mm0, 32(%1)\n"
22503- " movq %%mm1, 40(%1)\n"
22504- " movq %%mm2, 48(%1)\n"
22505- " movq %%mm3, 56(%1)\n"
22506+ "1: prefetch 320(%1)\n"
22507+ "2: movq (%1), %%mm0\n"
22508+ " movq 8(%1), %%mm1\n"
22509+ " movq 16(%1), %%mm2\n"
22510+ " movq 24(%1), %%mm3\n"
22511+ " movq %%mm0, (%2)\n"
22512+ " movq %%mm1, 8(%2)\n"
22513+ " movq %%mm2, 16(%2)\n"
22514+ " movq %%mm3, 24(%2)\n"
22515+ " movq 32(%1), %%mm0\n"
22516+ " movq 40(%1), %%mm1\n"
22517+ " movq 48(%1), %%mm2\n"
22518+ " movq 56(%1), %%mm3\n"
22519+ " movq %%mm0, 32(%2)\n"
22520+ " movq %%mm1, 40(%2)\n"
22521+ " movq %%mm2, 48(%2)\n"
22522+ " movq %%mm3, 56(%2)\n"
22523 ".section .fixup, \"ax\"\n"
22524- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22525+ "3:\n"
22526+
22527+#ifdef CONFIG_PAX_KERNEXEC
22528+ " movl %%cr0, %0\n"
22529+ " movl %0, %%eax\n"
22530+ " andl $0xFFFEFFFF, %%eax\n"
22531+ " movl %%eax, %%cr0\n"
22532+#endif
22533+
22534+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22535+
22536+#ifdef CONFIG_PAX_KERNEXEC
22537+ " movl %0, %%cr0\n"
22538+#endif
22539+
22540 " jmp 2b\n"
22541 ".previous\n"
22542 _ASM_EXTABLE(1b, 3b)
22543- : : "r" (from), "r" (to) : "memory");
22544+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22545
22546 from += 64;
22547 to += 64;
22548diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22549index 69fa106..adda88b 100644
22550--- a/arch/x86/lib/msr-reg.S
22551+++ b/arch/x86/lib/msr-reg.S
22552@@ -3,6 +3,7 @@
22553 #include <asm/dwarf2.h>
22554 #include <asm/asm.h>
22555 #include <asm/msr.h>
22556+#include <asm/alternative-asm.h>
22557
22558 #ifdef CONFIG_X86_64
22559 /*
22560@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22561 CFI_STARTPROC
22562 pushq_cfi %rbx
22563 pushq_cfi %rbp
22564- movq %rdi, %r10 /* Save pointer */
22565+ movq %rdi, %r9 /* Save pointer */
22566 xorl %r11d, %r11d /* Return value */
22567 movl (%rdi), %eax
22568 movl 4(%rdi), %ecx
22569@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22570 movl 28(%rdi), %edi
22571 CFI_REMEMBER_STATE
22572 1: \op
22573-2: movl %eax, (%r10)
22574+2: movl %eax, (%r9)
22575 movl %r11d, %eax /* Return value */
22576- movl %ecx, 4(%r10)
22577- movl %edx, 8(%r10)
22578- movl %ebx, 12(%r10)
22579- movl %ebp, 20(%r10)
22580- movl %esi, 24(%r10)
22581- movl %edi, 28(%r10)
22582+ movl %ecx, 4(%r9)
22583+ movl %edx, 8(%r9)
22584+ movl %ebx, 12(%r9)
22585+ movl %ebp, 20(%r9)
22586+ movl %esi, 24(%r9)
22587+ movl %edi, 28(%r9)
22588 popq_cfi %rbp
22589 popq_cfi %rbx
22590+ pax_force_retaddr
22591 ret
22592 3:
22593 CFI_RESTORE_STATE
22594diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
22595index 36b0d15..d381858 100644
22596--- a/arch/x86/lib/putuser.S
22597+++ b/arch/x86/lib/putuser.S
22598@@ -15,7 +15,9 @@
22599 #include <asm/thread_info.h>
22600 #include <asm/errno.h>
22601 #include <asm/asm.h>
22602-
22603+#include <asm/segment.h>
22604+#include <asm/pgtable.h>
22605+#include <asm/alternative-asm.h>
22606
22607 /*
22608 * __put_user_X
22609@@ -29,52 +31,119 @@
22610 * as they get called from within inline assembly.
22611 */
22612
22613-#define ENTER CFI_STARTPROC ; \
22614- GET_THREAD_INFO(%_ASM_BX)
22615-#define EXIT ret ; \
22616+#define ENTER CFI_STARTPROC
22617+#define EXIT pax_force_retaddr; ret ; \
22618 CFI_ENDPROC
22619
22620+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22621+#define _DEST %_ASM_CX,%_ASM_BX
22622+#else
22623+#define _DEST %_ASM_CX
22624+#endif
22625+
22626+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22627+#define __copyuser_seg gs;
22628+#else
22629+#define __copyuser_seg
22630+#endif
22631+
22632 .text
22633 ENTRY(__put_user_1)
22634 ENTER
22635+
22636+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22637+ GET_THREAD_INFO(%_ASM_BX)
22638 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
22639 jae bad_put_user
22640-1: movb %al,(%_ASM_CX)
22641+
22642+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22643+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22644+ cmp %_ASM_BX,%_ASM_CX
22645+ jb 1234f
22646+ xor %ebx,%ebx
22647+1234:
22648+#endif
22649+
22650+#endif
22651+
22652+1: __copyuser_seg movb %al,(_DEST)
22653 xor %eax,%eax
22654 EXIT
22655 ENDPROC(__put_user_1)
22656
22657 ENTRY(__put_user_2)
22658 ENTER
22659+
22660+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22661+ GET_THREAD_INFO(%_ASM_BX)
22662 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22663 sub $1,%_ASM_BX
22664 cmp %_ASM_BX,%_ASM_CX
22665 jae bad_put_user
22666-2: movw %ax,(%_ASM_CX)
22667+
22668+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22669+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22670+ cmp %_ASM_BX,%_ASM_CX
22671+ jb 1234f
22672+ xor %ebx,%ebx
22673+1234:
22674+#endif
22675+
22676+#endif
22677+
22678+2: __copyuser_seg movw %ax,(_DEST)
22679 xor %eax,%eax
22680 EXIT
22681 ENDPROC(__put_user_2)
22682
22683 ENTRY(__put_user_4)
22684 ENTER
22685+
22686+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22687+ GET_THREAD_INFO(%_ASM_BX)
22688 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22689 sub $3,%_ASM_BX
22690 cmp %_ASM_BX,%_ASM_CX
22691 jae bad_put_user
22692-3: movl %eax,(%_ASM_CX)
22693+
22694+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22695+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22696+ cmp %_ASM_BX,%_ASM_CX
22697+ jb 1234f
22698+ xor %ebx,%ebx
22699+1234:
22700+#endif
22701+
22702+#endif
22703+
22704+3: __copyuser_seg movl %eax,(_DEST)
22705 xor %eax,%eax
22706 EXIT
22707 ENDPROC(__put_user_4)
22708
22709 ENTRY(__put_user_8)
22710 ENTER
22711+
22712+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22713+ GET_THREAD_INFO(%_ASM_BX)
22714 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22715 sub $7,%_ASM_BX
22716 cmp %_ASM_BX,%_ASM_CX
22717 jae bad_put_user
22718-4: mov %_ASM_AX,(%_ASM_CX)
22719+
22720+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22721+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22722+ cmp %_ASM_BX,%_ASM_CX
22723+ jb 1234f
22724+ xor %ebx,%ebx
22725+1234:
22726+#endif
22727+
22728+#endif
22729+
22730+4: __copyuser_seg mov %_ASM_AX,(_DEST)
22731 #ifdef CONFIG_X86_32
22732-5: movl %edx,4(%_ASM_CX)
22733+5: __copyuser_seg movl %edx,4(_DEST)
22734 #endif
22735 xor %eax,%eax
22736 EXIT
22737diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
22738index 1cad221..de671ee 100644
22739--- a/arch/x86/lib/rwlock.S
22740+++ b/arch/x86/lib/rwlock.S
22741@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
22742 FRAME
22743 0: LOCK_PREFIX
22744 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22745+
22746+#ifdef CONFIG_PAX_REFCOUNT
22747+ jno 1234f
22748+ LOCK_PREFIX
22749+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22750+ int $4
22751+1234:
22752+ _ASM_EXTABLE(1234b, 1234b)
22753+#endif
22754+
22755 1: rep; nop
22756 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
22757 jne 1b
22758 LOCK_PREFIX
22759 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22760+
22761+#ifdef CONFIG_PAX_REFCOUNT
22762+ jno 1234f
22763+ LOCK_PREFIX
22764+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22765+ int $4
22766+1234:
22767+ _ASM_EXTABLE(1234b, 1234b)
22768+#endif
22769+
22770 jnz 0b
22771 ENDFRAME
22772+ pax_force_retaddr
22773 ret
22774 CFI_ENDPROC
22775 END(__write_lock_failed)
22776@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
22777 FRAME
22778 0: LOCK_PREFIX
22779 READ_LOCK_SIZE(inc) (%__lock_ptr)
22780+
22781+#ifdef CONFIG_PAX_REFCOUNT
22782+ jno 1234f
22783+ LOCK_PREFIX
22784+ READ_LOCK_SIZE(dec) (%__lock_ptr)
22785+ int $4
22786+1234:
22787+ _ASM_EXTABLE(1234b, 1234b)
22788+#endif
22789+
22790 1: rep; nop
22791 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
22792 js 1b
22793 LOCK_PREFIX
22794 READ_LOCK_SIZE(dec) (%__lock_ptr)
22795+
22796+#ifdef CONFIG_PAX_REFCOUNT
22797+ jno 1234f
22798+ LOCK_PREFIX
22799+ READ_LOCK_SIZE(inc) (%__lock_ptr)
22800+ int $4
22801+1234:
22802+ _ASM_EXTABLE(1234b, 1234b)
22803+#endif
22804+
22805 js 0b
22806 ENDFRAME
22807+ pax_force_retaddr
22808 ret
22809 CFI_ENDPROC
22810 END(__read_lock_failed)
22811diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
22812index 5dff5f0..cadebf4 100644
22813--- a/arch/x86/lib/rwsem.S
22814+++ b/arch/x86/lib/rwsem.S
22815@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
22816 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22817 CFI_RESTORE __ASM_REG(dx)
22818 restore_common_regs
22819+ pax_force_retaddr
22820 ret
22821 CFI_ENDPROC
22822 ENDPROC(call_rwsem_down_read_failed)
22823@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
22824 movq %rax,%rdi
22825 call rwsem_down_write_failed
22826 restore_common_regs
22827+ pax_force_retaddr
22828 ret
22829 CFI_ENDPROC
22830 ENDPROC(call_rwsem_down_write_failed)
22831@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
22832 movq %rax,%rdi
22833 call rwsem_wake
22834 restore_common_regs
22835-1: ret
22836+1: pax_force_retaddr
22837+ ret
22838 CFI_ENDPROC
22839 ENDPROC(call_rwsem_wake)
22840
22841@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
22842 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22843 CFI_RESTORE __ASM_REG(dx)
22844 restore_common_regs
22845+ pax_force_retaddr
22846 ret
22847 CFI_ENDPROC
22848 ENDPROC(call_rwsem_downgrade_wake)
22849diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
22850index a63efd6..ccecad8 100644
22851--- a/arch/x86/lib/thunk_64.S
22852+++ b/arch/x86/lib/thunk_64.S
22853@@ -8,6 +8,7 @@
22854 #include <linux/linkage.h>
22855 #include <asm/dwarf2.h>
22856 #include <asm/calling.h>
22857+#include <asm/alternative-asm.h>
22858
22859 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
22860 .macro THUNK name, func, put_ret_addr_in_rdi=0
22861@@ -41,5 +42,6 @@
22862 SAVE_ARGS
22863 restore:
22864 RESTORE_ARGS
22865+ pax_force_retaddr
22866 ret
22867 CFI_ENDPROC
22868diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
22869index ef2a6a5..3b28862 100644
22870--- a/arch/x86/lib/usercopy_32.c
22871+++ b/arch/x86/lib/usercopy_32.c
22872@@ -41,10 +41,12 @@ do { \
22873 int __d0; \
22874 might_fault(); \
22875 __asm__ __volatile__( \
22876+ __COPYUSER_SET_ES \
22877 "0: rep; stosl\n" \
22878 " movl %2,%0\n" \
22879 "1: rep; stosb\n" \
22880 "2:\n" \
22881+ __COPYUSER_RESTORE_ES \
22882 ".section .fixup,\"ax\"\n" \
22883 "3: lea 0(%2,%0,4),%0\n" \
22884 " jmp 2b\n" \
22885@@ -113,6 +115,7 @@ long strnlen_user(const char __user *s, long n)
22886 might_fault();
22887
22888 __asm__ __volatile__(
22889+ __COPYUSER_SET_ES
22890 " testl %0, %0\n"
22891 " jz 3f\n"
22892 " andl %0,%%ecx\n"
22893@@ -121,6 +124,7 @@ long strnlen_user(const char __user *s, long n)
22894 " subl %%ecx,%0\n"
22895 " addl %0,%%eax\n"
22896 "1:\n"
22897+ __COPYUSER_RESTORE_ES
22898 ".section .fixup,\"ax\"\n"
22899 "2: xorl %%eax,%%eax\n"
22900 " jmp 1b\n"
22901@@ -140,7 +144,7 @@ EXPORT_SYMBOL(strnlen_user);
22902
22903 #ifdef CONFIG_X86_INTEL_USERCOPY
22904 static unsigned long
22905-__copy_user_intel(void __user *to, const void *from, unsigned long size)
22906+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
22907 {
22908 int d0, d1;
22909 __asm__ __volatile__(
22910@@ -152,36 +156,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22911 " .align 2,0x90\n"
22912 "3: movl 0(%4), %%eax\n"
22913 "4: movl 4(%4), %%edx\n"
22914- "5: movl %%eax, 0(%3)\n"
22915- "6: movl %%edx, 4(%3)\n"
22916+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
22917+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
22918 "7: movl 8(%4), %%eax\n"
22919 "8: movl 12(%4),%%edx\n"
22920- "9: movl %%eax, 8(%3)\n"
22921- "10: movl %%edx, 12(%3)\n"
22922+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
22923+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
22924 "11: movl 16(%4), %%eax\n"
22925 "12: movl 20(%4), %%edx\n"
22926- "13: movl %%eax, 16(%3)\n"
22927- "14: movl %%edx, 20(%3)\n"
22928+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
22929+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
22930 "15: movl 24(%4), %%eax\n"
22931 "16: movl 28(%4), %%edx\n"
22932- "17: movl %%eax, 24(%3)\n"
22933- "18: movl %%edx, 28(%3)\n"
22934+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
22935+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
22936 "19: movl 32(%4), %%eax\n"
22937 "20: movl 36(%4), %%edx\n"
22938- "21: movl %%eax, 32(%3)\n"
22939- "22: movl %%edx, 36(%3)\n"
22940+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
22941+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
22942 "23: movl 40(%4), %%eax\n"
22943 "24: movl 44(%4), %%edx\n"
22944- "25: movl %%eax, 40(%3)\n"
22945- "26: movl %%edx, 44(%3)\n"
22946+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
22947+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
22948 "27: movl 48(%4), %%eax\n"
22949 "28: movl 52(%4), %%edx\n"
22950- "29: movl %%eax, 48(%3)\n"
22951- "30: movl %%edx, 52(%3)\n"
22952+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
22953+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
22954 "31: movl 56(%4), %%eax\n"
22955 "32: movl 60(%4), %%edx\n"
22956- "33: movl %%eax, 56(%3)\n"
22957- "34: movl %%edx, 60(%3)\n"
22958+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
22959+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
22960 " addl $-64, %0\n"
22961 " addl $64, %4\n"
22962 " addl $64, %3\n"
22963@@ -191,10 +195,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22964 " shrl $2, %0\n"
22965 " andl $3, %%eax\n"
22966 " cld\n"
22967+ __COPYUSER_SET_ES
22968 "99: rep; movsl\n"
22969 "36: movl %%eax, %0\n"
22970 "37: rep; movsb\n"
22971 "100:\n"
22972+ __COPYUSER_RESTORE_ES
22973 ".section .fixup,\"ax\"\n"
22974 "101: lea 0(%%eax,%0,4),%0\n"
22975 " jmp 100b\n"
22976@@ -247,46 +253,155 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22977 }
22978
22979 static unsigned long
22980+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
22981+{
22982+ int d0, d1;
22983+ __asm__ __volatile__(
22984+ " .align 2,0x90\n"
22985+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
22986+ " cmpl $67, %0\n"
22987+ " jbe 3f\n"
22988+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
22989+ " .align 2,0x90\n"
22990+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
22991+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
22992+ "5: movl %%eax, 0(%3)\n"
22993+ "6: movl %%edx, 4(%3)\n"
22994+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
22995+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
22996+ "9: movl %%eax, 8(%3)\n"
22997+ "10: movl %%edx, 12(%3)\n"
22998+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
22999+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
23000+ "13: movl %%eax, 16(%3)\n"
23001+ "14: movl %%edx, 20(%3)\n"
23002+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
23003+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
23004+ "17: movl %%eax, 24(%3)\n"
23005+ "18: movl %%edx, 28(%3)\n"
23006+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
23007+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
23008+ "21: movl %%eax, 32(%3)\n"
23009+ "22: movl %%edx, 36(%3)\n"
23010+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
23011+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
23012+ "25: movl %%eax, 40(%3)\n"
23013+ "26: movl %%edx, 44(%3)\n"
23014+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
23015+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
23016+ "29: movl %%eax, 48(%3)\n"
23017+ "30: movl %%edx, 52(%3)\n"
23018+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
23019+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
23020+ "33: movl %%eax, 56(%3)\n"
23021+ "34: movl %%edx, 60(%3)\n"
23022+ " addl $-64, %0\n"
23023+ " addl $64, %4\n"
23024+ " addl $64, %3\n"
23025+ " cmpl $63, %0\n"
23026+ " ja 1b\n"
23027+ "35: movl %0, %%eax\n"
23028+ " shrl $2, %0\n"
23029+ " andl $3, %%eax\n"
23030+ " cld\n"
23031+ "99: rep; "__copyuser_seg" movsl\n"
23032+ "36: movl %%eax, %0\n"
23033+ "37: rep; "__copyuser_seg" movsb\n"
23034+ "100:\n"
23035+ ".section .fixup,\"ax\"\n"
23036+ "101: lea 0(%%eax,%0,4),%0\n"
23037+ " jmp 100b\n"
23038+ ".previous\n"
23039+ ".section __ex_table,\"a\"\n"
23040+ " .align 4\n"
23041+ " .long 1b,100b\n"
23042+ " .long 2b,100b\n"
23043+ " .long 3b,100b\n"
23044+ " .long 4b,100b\n"
23045+ " .long 5b,100b\n"
23046+ " .long 6b,100b\n"
23047+ " .long 7b,100b\n"
23048+ " .long 8b,100b\n"
23049+ " .long 9b,100b\n"
23050+ " .long 10b,100b\n"
23051+ " .long 11b,100b\n"
23052+ " .long 12b,100b\n"
23053+ " .long 13b,100b\n"
23054+ " .long 14b,100b\n"
23055+ " .long 15b,100b\n"
23056+ " .long 16b,100b\n"
23057+ " .long 17b,100b\n"
23058+ " .long 18b,100b\n"
23059+ " .long 19b,100b\n"
23060+ " .long 20b,100b\n"
23061+ " .long 21b,100b\n"
23062+ " .long 22b,100b\n"
23063+ " .long 23b,100b\n"
23064+ " .long 24b,100b\n"
23065+ " .long 25b,100b\n"
23066+ " .long 26b,100b\n"
23067+ " .long 27b,100b\n"
23068+ " .long 28b,100b\n"
23069+ " .long 29b,100b\n"
23070+ " .long 30b,100b\n"
23071+ " .long 31b,100b\n"
23072+ " .long 32b,100b\n"
23073+ " .long 33b,100b\n"
23074+ " .long 34b,100b\n"
23075+ " .long 35b,100b\n"
23076+ " .long 36b,100b\n"
23077+ " .long 37b,100b\n"
23078+ " .long 99b,101b\n"
23079+ ".previous"
23080+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
23081+ : "1"(to), "2"(from), "0"(size)
23082+ : "eax", "edx", "memory");
23083+ return size;
23084+}
23085+
23086+static unsigned long
23087+__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) __size_overflow(3);
23088+static unsigned long
23089 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23090 {
23091 int d0, d1;
23092 __asm__ __volatile__(
23093 " .align 2,0x90\n"
23094- "0: movl 32(%4), %%eax\n"
23095+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23096 " cmpl $67, %0\n"
23097 " jbe 2f\n"
23098- "1: movl 64(%4), %%eax\n"
23099+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23100 " .align 2,0x90\n"
23101- "2: movl 0(%4), %%eax\n"
23102- "21: movl 4(%4), %%edx\n"
23103+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23104+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23105 " movl %%eax, 0(%3)\n"
23106 " movl %%edx, 4(%3)\n"
23107- "3: movl 8(%4), %%eax\n"
23108- "31: movl 12(%4),%%edx\n"
23109+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23110+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23111 " movl %%eax, 8(%3)\n"
23112 " movl %%edx, 12(%3)\n"
23113- "4: movl 16(%4), %%eax\n"
23114- "41: movl 20(%4), %%edx\n"
23115+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23116+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23117 " movl %%eax, 16(%3)\n"
23118 " movl %%edx, 20(%3)\n"
23119- "10: movl 24(%4), %%eax\n"
23120- "51: movl 28(%4), %%edx\n"
23121+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23122+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23123 " movl %%eax, 24(%3)\n"
23124 " movl %%edx, 28(%3)\n"
23125- "11: movl 32(%4), %%eax\n"
23126- "61: movl 36(%4), %%edx\n"
23127+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23128+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23129 " movl %%eax, 32(%3)\n"
23130 " movl %%edx, 36(%3)\n"
23131- "12: movl 40(%4), %%eax\n"
23132- "71: movl 44(%4), %%edx\n"
23133+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23134+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23135 " movl %%eax, 40(%3)\n"
23136 " movl %%edx, 44(%3)\n"
23137- "13: movl 48(%4), %%eax\n"
23138- "81: movl 52(%4), %%edx\n"
23139+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23140+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23141 " movl %%eax, 48(%3)\n"
23142 " movl %%edx, 52(%3)\n"
23143- "14: movl 56(%4), %%eax\n"
23144- "91: movl 60(%4), %%edx\n"
23145+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23146+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23147 " movl %%eax, 56(%3)\n"
23148 " movl %%edx, 60(%3)\n"
23149 " addl $-64, %0\n"
23150@@ -298,9 +413,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23151 " shrl $2, %0\n"
23152 " andl $3, %%eax\n"
23153 " cld\n"
23154- "6: rep; movsl\n"
23155+ "6: rep; "__copyuser_seg" movsl\n"
23156 " movl %%eax,%0\n"
23157- "7: rep; movsb\n"
23158+ "7: rep; "__copyuser_seg" movsb\n"
23159 "8:\n"
23160 ".section .fixup,\"ax\"\n"
23161 "9: lea 0(%%eax,%0,4),%0\n"
23162@@ -347,47 +462,49 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23163 */
23164
23165 static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23166+ const void __user *from, unsigned long size) __size_overflow(3);
23167+static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23168 const void __user *from, unsigned long size)
23169 {
23170 int d0, d1;
23171
23172 __asm__ __volatile__(
23173 " .align 2,0x90\n"
23174- "0: movl 32(%4), %%eax\n"
23175+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23176 " cmpl $67, %0\n"
23177 " jbe 2f\n"
23178- "1: movl 64(%4), %%eax\n"
23179+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23180 " .align 2,0x90\n"
23181- "2: movl 0(%4), %%eax\n"
23182- "21: movl 4(%4), %%edx\n"
23183+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23184+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23185 " movnti %%eax, 0(%3)\n"
23186 " movnti %%edx, 4(%3)\n"
23187- "3: movl 8(%4), %%eax\n"
23188- "31: movl 12(%4),%%edx\n"
23189+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23190+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23191 " movnti %%eax, 8(%3)\n"
23192 " movnti %%edx, 12(%3)\n"
23193- "4: movl 16(%4), %%eax\n"
23194- "41: movl 20(%4), %%edx\n"
23195+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23196+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23197 " movnti %%eax, 16(%3)\n"
23198 " movnti %%edx, 20(%3)\n"
23199- "10: movl 24(%4), %%eax\n"
23200- "51: movl 28(%4), %%edx\n"
23201+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23202+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23203 " movnti %%eax, 24(%3)\n"
23204 " movnti %%edx, 28(%3)\n"
23205- "11: movl 32(%4), %%eax\n"
23206- "61: movl 36(%4), %%edx\n"
23207+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23208+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23209 " movnti %%eax, 32(%3)\n"
23210 " movnti %%edx, 36(%3)\n"
23211- "12: movl 40(%4), %%eax\n"
23212- "71: movl 44(%4), %%edx\n"
23213+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23214+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23215 " movnti %%eax, 40(%3)\n"
23216 " movnti %%edx, 44(%3)\n"
23217- "13: movl 48(%4), %%eax\n"
23218- "81: movl 52(%4), %%edx\n"
23219+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23220+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23221 " movnti %%eax, 48(%3)\n"
23222 " movnti %%edx, 52(%3)\n"
23223- "14: movl 56(%4), %%eax\n"
23224- "91: movl 60(%4), %%edx\n"
23225+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23226+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23227 " movnti %%eax, 56(%3)\n"
23228 " movnti %%edx, 60(%3)\n"
23229 " addl $-64, %0\n"
23230@@ -400,9 +517,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23231 " shrl $2, %0\n"
23232 " andl $3, %%eax\n"
23233 " cld\n"
23234- "6: rep; movsl\n"
23235+ "6: rep; "__copyuser_seg" movsl\n"
23236 " movl %%eax,%0\n"
23237- "7: rep; movsb\n"
23238+ "7: rep; "__copyuser_seg" movsb\n"
23239 "8:\n"
23240 ".section .fixup,\"ax\"\n"
23241 "9: lea 0(%%eax,%0,4),%0\n"
23242@@ -444,47 +561,49 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23243 }
23244
23245 static unsigned long __copy_user_intel_nocache(void *to,
23246+ const void __user *from, unsigned long size) __size_overflow(3);
23247+static unsigned long __copy_user_intel_nocache(void *to,
23248 const void __user *from, unsigned long size)
23249 {
23250 int d0, d1;
23251
23252 __asm__ __volatile__(
23253 " .align 2,0x90\n"
23254- "0: movl 32(%4), %%eax\n"
23255+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23256 " cmpl $67, %0\n"
23257 " jbe 2f\n"
23258- "1: movl 64(%4), %%eax\n"
23259+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23260 " .align 2,0x90\n"
23261- "2: movl 0(%4), %%eax\n"
23262- "21: movl 4(%4), %%edx\n"
23263+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23264+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23265 " movnti %%eax, 0(%3)\n"
23266 " movnti %%edx, 4(%3)\n"
23267- "3: movl 8(%4), %%eax\n"
23268- "31: movl 12(%4),%%edx\n"
23269+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23270+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23271 " movnti %%eax, 8(%3)\n"
23272 " movnti %%edx, 12(%3)\n"
23273- "4: movl 16(%4), %%eax\n"
23274- "41: movl 20(%4), %%edx\n"
23275+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23276+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23277 " movnti %%eax, 16(%3)\n"
23278 " movnti %%edx, 20(%3)\n"
23279- "10: movl 24(%4), %%eax\n"
23280- "51: movl 28(%4), %%edx\n"
23281+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23282+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23283 " movnti %%eax, 24(%3)\n"
23284 " movnti %%edx, 28(%3)\n"
23285- "11: movl 32(%4), %%eax\n"
23286- "61: movl 36(%4), %%edx\n"
23287+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23288+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23289 " movnti %%eax, 32(%3)\n"
23290 " movnti %%edx, 36(%3)\n"
23291- "12: movl 40(%4), %%eax\n"
23292- "71: movl 44(%4), %%edx\n"
23293+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23294+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23295 " movnti %%eax, 40(%3)\n"
23296 " movnti %%edx, 44(%3)\n"
23297- "13: movl 48(%4), %%eax\n"
23298- "81: movl 52(%4), %%edx\n"
23299+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23300+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23301 " movnti %%eax, 48(%3)\n"
23302 " movnti %%edx, 52(%3)\n"
23303- "14: movl 56(%4), %%eax\n"
23304- "91: movl 60(%4), %%edx\n"
23305+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23306+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23307 " movnti %%eax, 56(%3)\n"
23308 " movnti %%edx, 60(%3)\n"
23309 " addl $-64, %0\n"
23310@@ -497,9 +616,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23311 " shrl $2, %0\n"
23312 " andl $3, %%eax\n"
23313 " cld\n"
23314- "6: rep; movsl\n"
23315+ "6: rep; "__copyuser_seg" movsl\n"
23316 " movl %%eax,%0\n"
23317- "7: rep; movsb\n"
23318+ "7: rep; "__copyuser_seg" movsb\n"
23319 "8:\n"
23320 ".section .fixup,\"ax\"\n"
23321 "9: lea 0(%%eax,%0,4),%0\n"
23322@@ -542,32 +661,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23323 */
23324 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23325 unsigned long size);
23326-unsigned long __copy_user_intel(void __user *to, const void *from,
23327+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23328+ unsigned long size);
23329+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23330 unsigned long size);
23331 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23332 const void __user *from, unsigned long size);
23333 #endif /* CONFIG_X86_INTEL_USERCOPY */
23334
23335 /* Generic arbitrary sized copy. */
23336-#define __copy_user(to, from, size) \
23337+#define __copy_user(to, from, size, prefix, set, restore) \
23338 do { \
23339 int __d0, __d1, __d2; \
23340 __asm__ __volatile__( \
23341+ set \
23342 " cmp $7,%0\n" \
23343 " jbe 1f\n" \
23344 " movl %1,%0\n" \
23345 " negl %0\n" \
23346 " andl $7,%0\n" \
23347 " subl %0,%3\n" \
23348- "4: rep; movsb\n" \
23349+ "4: rep; "prefix"movsb\n" \
23350 " movl %3,%0\n" \
23351 " shrl $2,%0\n" \
23352 " andl $3,%3\n" \
23353 " .align 2,0x90\n" \
23354- "0: rep; movsl\n" \
23355+ "0: rep; "prefix"movsl\n" \
23356 " movl %3,%0\n" \
23357- "1: rep; movsb\n" \
23358+ "1: rep; "prefix"movsb\n" \
23359 "2:\n" \
23360+ restore \
23361 ".section .fixup,\"ax\"\n" \
23362 "5: addl %3,%0\n" \
23363 " jmp 2b\n" \
23364@@ -595,14 +718,14 @@ do { \
23365 " negl %0\n" \
23366 " andl $7,%0\n" \
23367 " subl %0,%3\n" \
23368- "4: rep; movsb\n" \
23369+ "4: rep; "__copyuser_seg"movsb\n" \
23370 " movl %3,%0\n" \
23371 " shrl $2,%0\n" \
23372 " andl $3,%3\n" \
23373 " .align 2,0x90\n" \
23374- "0: rep; movsl\n" \
23375+ "0: rep; "__copyuser_seg"movsl\n" \
23376 " movl %3,%0\n" \
23377- "1: rep; movsb\n" \
23378+ "1: rep; "__copyuser_seg"movsb\n" \
23379 "2:\n" \
23380 ".section .fixup,\"ax\"\n" \
23381 "5: addl %3,%0\n" \
23382@@ -688,9 +811,9 @@ survive:
23383 }
23384 #endif
23385 if (movsl_is_ok(to, from, n))
23386- __copy_user(to, from, n);
23387+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23388 else
23389- n = __copy_user_intel(to, from, n);
23390+ n = __generic_copy_to_user_intel(to, from, n);
23391 return n;
23392 }
23393 EXPORT_SYMBOL(__copy_to_user_ll);
23394@@ -710,10 +833,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23395 unsigned long n)
23396 {
23397 if (movsl_is_ok(to, from, n))
23398- __copy_user(to, from, n);
23399+ __copy_user(to, from, n, __copyuser_seg, "", "");
23400 else
23401- n = __copy_user_intel((void __user *)to,
23402- (const void *)from, n);
23403+ n = __generic_copy_from_user_intel(to, from, n);
23404 return n;
23405 }
23406 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23407@@ -740,65 +862,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23408 if (n > 64 && cpu_has_xmm2)
23409 n = __copy_user_intel_nocache(to, from, n);
23410 else
23411- __copy_user(to, from, n);
23412+ __copy_user(to, from, n, __copyuser_seg, "", "");
23413 #else
23414- __copy_user(to, from, n);
23415+ __copy_user(to, from, n, __copyuser_seg, "", "");
23416 #endif
23417 return n;
23418 }
23419 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23420
23421-/**
23422- * copy_to_user: - Copy a block of data into user space.
23423- * @to: Destination address, in user space.
23424- * @from: Source address, in kernel space.
23425- * @n: Number of bytes to copy.
23426- *
23427- * Context: User context only. This function may sleep.
23428- *
23429- * Copy data from kernel space to user space.
23430- *
23431- * Returns number of bytes that could not be copied.
23432- * On success, this will be zero.
23433- */
23434-unsigned long
23435-copy_to_user(void __user *to, const void *from, unsigned long n)
23436-{
23437- if (access_ok(VERIFY_WRITE, to, n))
23438- n = __copy_to_user(to, from, n);
23439- return n;
23440-}
23441-EXPORT_SYMBOL(copy_to_user);
23442-
23443-/**
23444- * copy_from_user: - Copy a block of data from user space.
23445- * @to: Destination address, in kernel space.
23446- * @from: Source address, in user space.
23447- * @n: Number of bytes to copy.
23448- *
23449- * Context: User context only. This function may sleep.
23450- *
23451- * Copy data from user space to kernel space.
23452- *
23453- * Returns number of bytes that could not be copied.
23454- * On success, this will be zero.
23455- *
23456- * If some data could not be copied, this function will pad the copied
23457- * data to the requested size using zero bytes.
23458- */
23459-unsigned long
23460-_copy_from_user(void *to, const void __user *from, unsigned long n)
23461-{
23462- if (access_ok(VERIFY_READ, from, n))
23463- n = __copy_from_user(to, from, n);
23464- else
23465- memset(to, 0, n);
23466- return n;
23467-}
23468-EXPORT_SYMBOL(_copy_from_user);
23469-
23470 void copy_from_user_overflow(void)
23471 {
23472 WARN(1, "Buffer overflow detected!\n");
23473 }
23474 EXPORT_SYMBOL(copy_from_user_overflow);
23475+
23476+void copy_to_user_overflow(void)
23477+{
23478+ WARN(1, "Buffer overflow detected!\n");
23479+}
23480+EXPORT_SYMBOL(copy_to_user_overflow);
23481+
23482+#ifdef CONFIG_PAX_MEMORY_UDEREF
23483+void __set_fs(mm_segment_t x)
23484+{
23485+ switch (x.seg) {
23486+ case 0:
23487+ loadsegment(gs, 0);
23488+ break;
23489+ case TASK_SIZE_MAX:
23490+ loadsegment(gs, __USER_DS);
23491+ break;
23492+ case -1UL:
23493+ loadsegment(gs, __KERNEL_DS);
23494+ break;
23495+ default:
23496+ BUG();
23497+ }
23498+ return;
23499+}
23500+EXPORT_SYMBOL(__set_fs);
23501+
23502+void set_fs(mm_segment_t x)
23503+{
23504+ current_thread_info()->addr_limit = x;
23505+ __set_fs(x);
23506+}
23507+EXPORT_SYMBOL(set_fs);
23508+#endif
23509diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23510index 0d0326f..6a6155b 100644
23511--- a/arch/x86/lib/usercopy_64.c
23512+++ b/arch/x86/lib/usercopy_64.c
23513@@ -16,6 +16,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23514 {
23515 long __d0;
23516 might_fault();
23517+
23518+#ifdef CONFIG_PAX_MEMORY_UDEREF
23519+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23520+ addr += PAX_USER_SHADOW_BASE;
23521+#endif
23522+
23523 /* no memory constraint because it doesn't change any memory gcc knows
23524 about */
23525 asm volatile(
23526@@ -100,12 +106,20 @@ long strlen_user(const char __user *s)
23527 }
23528 EXPORT_SYMBOL(strlen_user);
23529
23530-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23531+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23532 {
23533- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23534- return copy_user_generic((__force void *)to, (__force void *)from, len);
23535- }
23536- return len;
23537+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23538+
23539+#ifdef CONFIG_PAX_MEMORY_UDEREF
23540+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23541+ to += PAX_USER_SHADOW_BASE;
23542+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23543+ from += PAX_USER_SHADOW_BASE;
23544+#endif
23545+
23546+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23547+ }
23548+ return len;
23549 }
23550 EXPORT_SYMBOL(copy_in_user);
23551
23552@@ -115,7 +129,7 @@ EXPORT_SYMBOL(copy_in_user);
23553 * it is not necessary to optimize tail handling.
23554 */
23555 unsigned long
23556-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23557+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23558 {
23559 char c;
23560 unsigned zero_len;
23561@@ -132,3 +146,15 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23562 break;
23563 return len;
23564 }
23565+
23566+void copy_from_user_overflow(void)
23567+{
23568+ WARN(1, "Buffer overflow detected!\n");
23569+}
23570+EXPORT_SYMBOL(copy_from_user_overflow);
23571+
23572+void copy_to_user_overflow(void)
23573+{
23574+ WARN(1, "Buffer overflow detected!\n");
23575+}
23576+EXPORT_SYMBOL(copy_to_user_overflow);
23577diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23578index 1fb85db..8b3540b 100644
23579--- a/arch/x86/mm/extable.c
23580+++ b/arch/x86/mm/extable.c
23581@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
23582 const struct exception_table_entry *fixup;
23583
23584 #ifdef CONFIG_PNPBIOS
23585- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
23586+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
23587 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
23588 extern u32 pnp_bios_is_utter_crap;
23589 pnp_bios_is_utter_crap = 1;
23590diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
23591index 3ecfd1a..304d554 100644
23592--- a/arch/x86/mm/fault.c
23593+++ b/arch/x86/mm/fault.c
23594@@ -13,11 +13,18 @@
23595 #include <linux/perf_event.h> /* perf_sw_event */
23596 #include <linux/hugetlb.h> /* hstate_index_to_shift */
23597 #include <linux/prefetch.h> /* prefetchw */
23598+#include <linux/unistd.h>
23599+#include <linux/compiler.h>
23600
23601 #include <asm/traps.h> /* dotraplinkage, ... */
23602 #include <asm/pgalloc.h> /* pgd_*(), ... */
23603 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
23604 #include <asm/fixmap.h> /* VSYSCALL_START */
23605+#include <asm/tlbflush.h>
23606+
23607+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23608+#include <asm/stacktrace.h>
23609+#endif
23610
23611 /*
23612 * Page fault error code bits:
23613@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
23614 int ret = 0;
23615
23616 /* kprobe_running() needs smp_processor_id() */
23617- if (kprobes_built_in() && !user_mode_vm(regs)) {
23618+ if (kprobes_built_in() && !user_mode(regs)) {
23619 preempt_disable();
23620 if (kprobe_running() && kprobe_fault_handler(regs, 14))
23621 ret = 1;
23622@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
23623 return !instr_lo || (instr_lo>>1) == 1;
23624 case 0x00:
23625 /* Prefetch instruction is 0x0F0D or 0x0F18 */
23626- if (probe_kernel_address(instr, opcode))
23627+ if (user_mode(regs)) {
23628+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23629+ return 0;
23630+ } else if (probe_kernel_address(instr, opcode))
23631 return 0;
23632
23633 *prefetch = (instr_lo == 0xF) &&
23634@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
23635 while (instr < max_instr) {
23636 unsigned char opcode;
23637
23638- if (probe_kernel_address(instr, opcode))
23639+ if (user_mode(regs)) {
23640+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23641+ break;
23642+ } else if (probe_kernel_address(instr, opcode))
23643 break;
23644
23645 instr++;
23646@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
23647 force_sig_info(si_signo, &info, tsk);
23648 }
23649
23650+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23651+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
23652+#endif
23653+
23654+#ifdef CONFIG_PAX_EMUTRAMP
23655+static int pax_handle_fetch_fault(struct pt_regs *regs);
23656+#endif
23657+
23658+#ifdef CONFIG_PAX_PAGEEXEC
23659+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
23660+{
23661+ pgd_t *pgd;
23662+ pud_t *pud;
23663+ pmd_t *pmd;
23664+
23665+ pgd = pgd_offset(mm, address);
23666+ if (!pgd_present(*pgd))
23667+ return NULL;
23668+ pud = pud_offset(pgd, address);
23669+ if (!pud_present(*pud))
23670+ return NULL;
23671+ pmd = pmd_offset(pud, address);
23672+ if (!pmd_present(*pmd))
23673+ return NULL;
23674+ return pmd;
23675+}
23676+#endif
23677+
23678 DEFINE_SPINLOCK(pgd_lock);
23679 LIST_HEAD(pgd_list);
23680
23681@@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
23682 for (address = VMALLOC_START & PMD_MASK;
23683 address >= TASK_SIZE && address < FIXADDR_TOP;
23684 address += PMD_SIZE) {
23685+
23686+#ifdef CONFIG_PAX_PER_CPU_PGD
23687+ unsigned long cpu;
23688+#else
23689 struct page *page;
23690+#endif
23691
23692 spin_lock(&pgd_lock);
23693+
23694+#ifdef CONFIG_PAX_PER_CPU_PGD
23695+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23696+ pgd_t *pgd = get_cpu_pgd(cpu);
23697+ pmd_t *ret;
23698+#else
23699 list_for_each_entry(page, &pgd_list, lru) {
23700+ pgd_t *pgd = page_address(page);
23701 spinlock_t *pgt_lock;
23702 pmd_t *ret;
23703
23704@@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
23705 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
23706
23707 spin_lock(pgt_lock);
23708- ret = vmalloc_sync_one(page_address(page), address);
23709+#endif
23710+
23711+ ret = vmalloc_sync_one(pgd, address);
23712+
23713+#ifndef CONFIG_PAX_PER_CPU_PGD
23714 spin_unlock(pgt_lock);
23715+#endif
23716
23717 if (!ret)
23718 break;
23719@@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23720 * an interrupt in the middle of a task switch..
23721 */
23722 pgd_paddr = read_cr3();
23723+
23724+#ifdef CONFIG_PAX_PER_CPU_PGD
23725+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23726+#endif
23727+
23728 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23729 if (!pmd_k)
23730 return -1;
23731@@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23732 * happen within a race in page table update. In the later
23733 * case just flush:
23734 */
23735+
23736+#ifdef CONFIG_PAX_PER_CPU_PGD
23737+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23738+ pgd = pgd_offset_cpu(smp_processor_id(), address);
23739+#else
23740 pgd = pgd_offset(current->active_mm, address);
23741+#endif
23742+
23743 pgd_ref = pgd_offset_k(address);
23744 if (pgd_none(*pgd_ref))
23745 return -1;
23746@@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23747 static int is_errata100(struct pt_regs *regs, unsigned long address)
23748 {
23749 #ifdef CONFIG_X86_64
23750- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
23751+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
23752 return 1;
23753 #endif
23754 return 0;
23755@@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
23756 }
23757
23758 static const char nx_warning[] = KERN_CRIT
23759-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
23760+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
23761
23762 static void
23763 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23764@@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23765 if (!oops_may_print())
23766 return;
23767
23768- if (error_code & PF_INSTR) {
23769+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
23770 unsigned int level;
23771
23772 pte_t *pte = lookup_address(address, &level);
23773
23774 if (pte && pte_present(*pte) && !pte_exec(*pte))
23775- printk(nx_warning, current_uid());
23776+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
23777 }
23778
23779+#ifdef CONFIG_PAX_KERNEXEC
23780+ if (init_mm.start_code <= address && address < init_mm.end_code) {
23781+ if (current->signal->curr_ip)
23782+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23783+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
23784+ else
23785+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23786+ current->comm, task_pid_nr(current), current_uid(), current_euid());
23787+ }
23788+#endif
23789+
23790 printk(KERN_ALERT "BUG: unable to handle kernel ");
23791 if (address < PAGE_SIZE)
23792 printk(KERN_CONT "NULL pointer dereference");
23793@@ -748,6 +829,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
23794 }
23795 #endif
23796
23797+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23798+ if (pax_is_fetch_fault(regs, error_code, address)) {
23799+
23800+#ifdef CONFIG_PAX_EMUTRAMP
23801+ switch (pax_handle_fetch_fault(regs)) {
23802+ case 2:
23803+ return;
23804+ }
23805+#endif
23806+
23807+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23808+ do_group_exit(SIGKILL);
23809+ }
23810+#endif
23811+
23812 if (unlikely(show_unhandled_signals))
23813 show_signal_msg(regs, error_code, address, tsk);
23814
23815@@ -844,7 +940,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
23816 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
23817 printk(KERN_ERR
23818 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
23819- tsk->comm, tsk->pid, address);
23820+ tsk->comm, task_pid_nr(tsk), address);
23821 code = BUS_MCEERR_AR;
23822 }
23823 #endif
23824@@ -900,6 +996,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
23825 return 1;
23826 }
23827
23828+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23829+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
23830+{
23831+ pte_t *pte;
23832+ pmd_t *pmd;
23833+ spinlock_t *ptl;
23834+ unsigned char pte_mask;
23835+
23836+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
23837+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
23838+ return 0;
23839+
23840+ /* PaX: it's our fault, let's handle it if we can */
23841+
23842+ /* PaX: take a look at read faults before acquiring any locks */
23843+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
23844+ /* instruction fetch attempt from a protected page in user mode */
23845+ up_read(&mm->mmap_sem);
23846+
23847+#ifdef CONFIG_PAX_EMUTRAMP
23848+ switch (pax_handle_fetch_fault(regs)) {
23849+ case 2:
23850+ return 1;
23851+ }
23852+#endif
23853+
23854+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23855+ do_group_exit(SIGKILL);
23856+ }
23857+
23858+ pmd = pax_get_pmd(mm, address);
23859+ if (unlikely(!pmd))
23860+ return 0;
23861+
23862+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
23863+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
23864+ pte_unmap_unlock(pte, ptl);
23865+ return 0;
23866+ }
23867+
23868+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
23869+ /* write attempt to a protected page in user mode */
23870+ pte_unmap_unlock(pte, ptl);
23871+ return 0;
23872+ }
23873+
23874+#ifdef CONFIG_SMP
23875+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
23876+#else
23877+ if (likely(address > get_limit(regs->cs)))
23878+#endif
23879+ {
23880+ set_pte(pte, pte_mkread(*pte));
23881+ __flush_tlb_one(address);
23882+ pte_unmap_unlock(pte, ptl);
23883+ up_read(&mm->mmap_sem);
23884+ return 1;
23885+ }
23886+
23887+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
23888+
23889+ /*
23890+ * PaX: fill DTLB with user rights and retry
23891+ */
23892+ __asm__ __volatile__ (
23893+ "orb %2,(%1)\n"
23894+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
23895+/*
23896+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
23897+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
23898+ * page fault when examined during a TLB load attempt. this is true not only
23899+ * for PTEs holding a non-present entry but also present entries that will
23900+ * raise a page fault (such as those set up by PaX, or the copy-on-write
23901+ * mechanism). in effect it means that we do *not* need to flush the TLBs
23902+ * for our target pages since their PTEs are simply not in the TLBs at all.
23903+
23904+ * the best thing in omitting it is that we gain around 15-20% speed in the
23905+ * fast path of the page fault handler and can get rid of tracing since we
23906+ * can no longer flush unintended entries.
23907+ */
23908+ "invlpg (%0)\n"
23909+#endif
23910+ __copyuser_seg"testb $0,(%0)\n"
23911+ "xorb %3,(%1)\n"
23912+ :
23913+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
23914+ : "memory", "cc");
23915+ pte_unmap_unlock(pte, ptl);
23916+ up_read(&mm->mmap_sem);
23917+ return 1;
23918+}
23919+#endif
23920+
23921 /*
23922 * Handle a spurious fault caused by a stale TLB entry.
23923 *
23924@@ -972,6 +1161,9 @@ int show_unhandled_signals = 1;
23925 static inline int
23926 access_error(unsigned long error_code, struct vm_area_struct *vma)
23927 {
23928+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
23929+ return 1;
23930+
23931 if (error_code & PF_WRITE) {
23932 /* write, present and write, not present: */
23933 if (unlikely(!(vma->vm_flags & VM_WRITE)))
23934@@ -1005,18 +1197,33 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23935 {
23936 struct vm_area_struct *vma;
23937 struct task_struct *tsk;
23938- unsigned long address;
23939 struct mm_struct *mm;
23940 int fault;
23941 int write = error_code & PF_WRITE;
23942 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
23943 (write ? FAULT_FLAG_WRITE : 0);
23944
23945- tsk = current;
23946- mm = tsk->mm;
23947-
23948 /* Get the faulting address: */
23949- address = read_cr2();
23950+ unsigned long address = read_cr2();
23951+
23952+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23953+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
23954+ if (!search_exception_tables(regs->ip)) {
23955+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23956+ bad_area_nosemaphore(regs, error_code, address);
23957+ return;
23958+ }
23959+ if (address < PAX_USER_SHADOW_BASE) {
23960+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23961+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
23962+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
23963+ } else
23964+ address -= PAX_USER_SHADOW_BASE;
23965+ }
23966+#endif
23967+
23968+ tsk = current;
23969+ mm = tsk->mm;
23970
23971 /*
23972 * Detect and handle instructions that would cause a page fault for
23973@@ -1077,7 +1284,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23974 * User-mode registers count as a user access even for any
23975 * potential system fault or CPU buglet:
23976 */
23977- if (user_mode_vm(regs)) {
23978+ if (user_mode(regs)) {
23979 local_irq_enable();
23980 error_code |= PF_USER;
23981 } else {
23982@@ -1132,6 +1339,11 @@ retry:
23983 might_sleep();
23984 }
23985
23986+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23987+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
23988+ return;
23989+#endif
23990+
23991 vma = find_vma(mm, address);
23992 if (unlikely(!vma)) {
23993 bad_area(regs, error_code, address);
23994@@ -1143,18 +1355,24 @@ retry:
23995 bad_area(regs, error_code, address);
23996 return;
23997 }
23998- if (error_code & PF_USER) {
23999- /*
24000- * Accessing the stack below %sp is always a bug.
24001- * The large cushion allows instructions like enter
24002- * and pusha to work. ("enter $65535, $31" pushes
24003- * 32 pointers and then decrements %sp by 65535.)
24004- */
24005- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
24006- bad_area(regs, error_code, address);
24007- return;
24008- }
24009+ /*
24010+ * Accessing the stack below %sp is always a bug.
24011+ * The large cushion allows instructions like enter
24012+ * and pusha to work. ("enter $65535, $31" pushes
24013+ * 32 pointers and then decrements %sp by 65535.)
24014+ */
24015+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
24016+ bad_area(regs, error_code, address);
24017+ return;
24018 }
24019+
24020+#ifdef CONFIG_PAX_SEGMEXEC
24021+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
24022+ bad_area(regs, error_code, address);
24023+ return;
24024+ }
24025+#endif
24026+
24027 if (unlikely(expand_stack(vma, address))) {
24028 bad_area(regs, error_code, address);
24029 return;
24030@@ -1209,3 +1427,292 @@ good_area:
24031
24032 up_read(&mm->mmap_sem);
24033 }
24034+
24035+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24036+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24037+{
24038+ struct mm_struct *mm = current->mm;
24039+ unsigned long ip = regs->ip;
24040+
24041+ if (v8086_mode(regs))
24042+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24043+
24044+#ifdef CONFIG_PAX_PAGEEXEC
24045+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24046+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24047+ return true;
24048+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24049+ return true;
24050+ return false;
24051+ }
24052+#endif
24053+
24054+#ifdef CONFIG_PAX_SEGMEXEC
24055+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24056+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24057+ return true;
24058+ return false;
24059+ }
24060+#endif
24061+
24062+ return false;
24063+}
24064+#endif
24065+
24066+#ifdef CONFIG_PAX_EMUTRAMP
24067+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24068+{
24069+ int err;
24070+
24071+ do { /* PaX: libffi trampoline emulation */
24072+ unsigned char mov, jmp;
24073+ unsigned int addr1, addr2;
24074+
24075+#ifdef CONFIG_X86_64
24076+ if ((regs->ip + 9) >> 32)
24077+ break;
24078+#endif
24079+
24080+ err = get_user(mov, (unsigned char __user *)regs->ip);
24081+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24082+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24083+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24084+
24085+ if (err)
24086+ break;
24087+
24088+ if (mov == 0xB8 && jmp == 0xE9) {
24089+ regs->ax = addr1;
24090+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24091+ return 2;
24092+ }
24093+ } while (0);
24094+
24095+ do { /* PaX: gcc trampoline emulation #1 */
24096+ unsigned char mov1, mov2;
24097+ unsigned short jmp;
24098+ unsigned int addr1, addr2;
24099+
24100+#ifdef CONFIG_X86_64
24101+ if ((regs->ip + 11) >> 32)
24102+ break;
24103+#endif
24104+
24105+ err = get_user(mov1, (unsigned char __user *)regs->ip);
24106+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24107+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24108+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24109+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24110+
24111+ if (err)
24112+ break;
24113+
24114+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24115+ regs->cx = addr1;
24116+ regs->ax = addr2;
24117+ regs->ip = addr2;
24118+ return 2;
24119+ }
24120+ } while (0);
24121+
24122+ do { /* PaX: gcc trampoline emulation #2 */
24123+ unsigned char mov, jmp;
24124+ unsigned int addr1, addr2;
24125+
24126+#ifdef CONFIG_X86_64
24127+ if ((regs->ip + 9) >> 32)
24128+ break;
24129+#endif
24130+
24131+ err = get_user(mov, (unsigned char __user *)regs->ip);
24132+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24133+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24134+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24135+
24136+ if (err)
24137+ break;
24138+
24139+ if (mov == 0xB9 && jmp == 0xE9) {
24140+ regs->cx = addr1;
24141+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24142+ return 2;
24143+ }
24144+ } while (0);
24145+
24146+ return 1; /* PaX in action */
24147+}
24148+
24149+#ifdef CONFIG_X86_64
24150+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24151+{
24152+ int err;
24153+
24154+ do { /* PaX: libffi trampoline emulation */
24155+ unsigned short mov1, mov2, jmp1;
24156+ unsigned char stcclc, jmp2;
24157+ unsigned long addr1, addr2;
24158+
24159+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24160+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24161+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24162+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24163+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
24164+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
24165+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
24166+
24167+ if (err)
24168+ break;
24169+
24170+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24171+ regs->r11 = addr1;
24172+ regs->r10 = addr2;
24173+ if (stcclc == 0xF8)
24174+ regs->flags &= ~X86_EFLAGS_CF;
24175+ else
24176+ regs->flags |= X86_EFLAGS_CF;
24177+ regs->ip = addr1;
24178+ return 2;
24179+ }
24180+ } while (0);
24181+
24182+ do { /* PaX: gcc trampoline emulation #1 */
24183+ unsigned short mov1, mov2, jmp1;
24184+ unsigned char jmp2;
24185+ unsigned int addr1;
24186+ unsigned long addr2;
24187+
24188+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24189+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24190+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24191+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24192+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24193+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24194+
24195+ if (err)
24196+ break;
24197+
24198+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24199+ regs->r11 = addr1;
24200+ regs->r10 = addr2;
24201+ regs->ip = addr1;
24202+ return 2;
24203+ }
24204+ } while (0);
24205+
24206+ do { /* PaX: gcc trampoline emulation #2 */
24207+ unsigned short mov1, mov2, jmp1;
24208+ unsigned char jmp2;
24209+ unsigned long addr1, addr2;
24210+
24211+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24212+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24213+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24214+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24215+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24216+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24217+
24218+ if (err)
24219+ break;
24220+
24221+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24222+ regs->r11 = addr1;
24223+ regs->r10 = addr2;
24224+ regs->ip = addr1;
24225+ return 2;
24226+ }
24227+ } while (0);
24228+
24229+ return 1; /* PaX in action */
24230+}
24231+#endif
24232+
24233+/*
24234+ * PaX: decide what to do with offenders (regs->ip = fault address)
24235+ *
24236+ * returns 1 when task should be killed
24237+ * 2 when gcc trampoline was detected
24238+ */
24239+static int pax_handle_fetch_fault(struct pt_regs *regs)
24240+{
24241+ if (v8086_mode(regs))
24242+ return 1;
24243+
24244+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24245+ return 1;
24246+
24247+#ifdef CONFIG_X86_32
24248+ return pax_handle_fetch_fault_32(regs);
24249+#else
24250+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24251+ return pax_handle_fetch_fault_32(regs);
24252+ else
24253+ return pax_handle_fetch_fault_64(regs);
24254+#endif
24255+}
24256+#endif
24257+
24258+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24259+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24260+{
24261+ long i;
24262+
24263+ printk(KERN_ERR "PAX: bytes at PC: ");
24264+ for (i = 0; i < 20; i++) {
24265+ unsigned char c;
24266+ if (get_user(c, (unsigned char __force_user *)pc+i))
24267+ printk(KERN_CONT "?? ");
24268+ else
24269+ printk(KERN_CONT "%02x ", c);
24270+ }
24271+ printk("\n");
24272+
24273+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24274+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
24275+ unsigned long c;
24276+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
24277+#ifdef CONFIG_X86_32
24278+ printk(KERN_CONT "???????? ");
24279+#else
24280+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24281+ printk(KERN_CONT "???????? ???????? ");
24282+ else
24283+ printk(KERN_CONT "???????????????? ");
24284+#endif
24285+ } else {
24286+#ifdef CONFIG_X86_64
24287+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24288+ printk(KERN_CONT "%08x ", (unsigned int)c);
24289+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24290+ } else
24291+#endif
24292+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24293+ }
24294+ }
24295+ printk("\n");
24296+}
24297+#endif
24298+
24299+/**
24300+ * probe_kernel_write(): safely attempt to write to a location
24301+ * @dst: address to write to
24302+ * @src: pointer to the data that shall be written
24303+ * @size: size of the data chunk
24304+ *
24305+ * Safely write to address @dst from the buffer at @src. If a kernel fault
24306+ * happens, handle that and return -EFAULT.
24307+ */
24308+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24309+{
24310+ long ret;
24311+ mm_segment_t old_fs = get_fs();
24312+
24313+ set_fs(KERNEL_DS);
24314+ pagefault_disable();
24315+ pax_open_kernel();
24316+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24317+ pax_close_kernel();
24318+ pagefault_enable();
24319+ set_fs(old_fs);
24320+
24321+ return ret ? -EFAULT : 0;
24322+}
24323diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24324index dd74e46..7d26398 100644
24325--- a/arch/x86/mm/gup.c
24326+++ b/arch/x86/mm/gup.c
24327@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24328 addr = start;
24329 len = (unsigned long) nr_pages << PAGE_SHIFT;
24330 end = start + len;
24331- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24332+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24333 (void __user *)start, len)))
24334 return 0;
24335
24336diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24337index 6f31ee5..8ee4164 100644
24338--- a/arch/x86/mm/highmem_32.c
24339+++ b/arch/x86/mm/highmem_32.c
24340@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
24341 idx = type + KM_TYPE_NR*smp_processor_id();
24342 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24343 BUG_ON(!pte_none(*(kmap_pte-idx)));
24344+
24345+ pax_open_kernel();
24346 set_pte(kmap_pte-idx, mk_pte(page, prot));
24347+ pax_close_kernel();
24348+
24349 arch_flush_lazy_mmu_mode();
24350
24351 return (void *)vaddr;
24352diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24353index f6679a7..8f795a3 100644
24354--- a/arch/x86/mm/hugetlbpage.c
24355+++ b/arch/x86/mm/hugetlbpage.c
24356@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24357 struct hstate *h = hstate_file(file);
24358 struct mm_struct *mm = current->mm;
24359 struct vm_area_struct *vma;
24360- unsigned long start_addr;
24361+ unsigned long start_addr, pax_task_size = TASK_SIZE;
24362+
24363+#ifdef CONFIG_PAX_SEGMEXEC
24364+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24365+ pax_task_size = SEGMEXEC_TASK_SIZE;
24366+#endif
24367+
24368+ pax_task_size -= PAGE_SIZE;
24369
24370 if (len > mm->cached_hole_size) {
24371- start_addr = mm->free_area_cache;
24372+ start_addr = mm->free_area_cache;
24373 } else {
24374- start_addr = TASK_UNMAPPED_BASE;
24375- mm->cached_hole_size = 0;
24376+ start_addr = mm->mmap_base;
24377+ mm->cached_hole_size = 0;
24378 }
24379
24380 full_search:
24381@@ -280,26 +287,27 @@ full_search:
24382
24383 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24384 /* At this point: (!vma || addr < vma->vm_end). */
24385- if (TASK_SIZE - len < addr) {
24386+ if (pax_task_size - len < addr) {
24387 /*
24388 * Start a new search - just in case we missed
24389 * some holes.
24390 */
24391- if (start_addr != TASK_UNMAPPED_BASE) {
24392- start_addr = TASK_UNMAPPED_BASE;
24393+ if (start_addr != mm->mmap_base) {
24394+ start_addr = mm->mmap_base;
24395 mm->cached_hole_size = 0;
24396 goto full_search;
24397 }
24398 return -ENOMEM;
24399 }
24400- if (!vma || addr + len <= vma->vm_start) {
24401- mm->free_area_cache = addr + len;
24402- return addr;
24403- }
24404+ if (check_heap_stack_gap(vma, addr, len))
24405+ break;
24406 if (addr + mm->cached_hole_size < vma->vm_start)
24407 mm->cached_hole_size = vma->vm_start - addr;
24408 addr = ALIGN(vma->vm_end, huge_page_size(h));
24409 }
24410+
24411+ mm->free_area_cache = addr + len;
24412+ return addr;
24413 }
24414
24415 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24416@@ -310,9 +318,8 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24417 struct mm_struct *mm = current->mm;
24418 struct vm_area_struct *vma;
24419 unsigned long base = mm->mmap_base;
24420- unsigned long addr = addr0;
24421+ unsigned long addr;
24422 unsigned long largest_hole = mm->cached_hole_size;
24423- unsigned long start_addr;
24424
24425 /* don't allow allocations above current base */
24426 if (mm->free_area_cache > base)
24427@@ -322,16 +329,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24428 largest_hole = 0;
24429 mm->free_area_cache = base;
24430 }
24431-try_again:
24432- start_addr = mm->free_area_cache;
24433
24434 /* make sure it can fit in the remaining address space */
24435 if (mm->free_area_cache < len)
24436 goto fail;
24437
24438 /* either no address requested or can't fit in requested address hole */
24439- addr = (mm->free_area_cache - len) & huge_page_mask(h);
24440+ addr = mm->free_area_cache - len;
24441 do {
24442+ addr &= huge_page_mask(h);
24443 /*
24444 * Lookup failure means no vma is above this address,
24445 * i.e. return with success:
24446@@ -340,10 +346,10 @@ try_again:
24447 if (!vma)
24448 return addr;
24449
24450- if (addr + len <= vma->vm_start) {
24451+ if (check_heap_stack_gap(vma, addr, len)) {
24452 /* remember the address as a hint for next time */
24453- mm->cached_hole_size = largest_hole;
24454- return (mm->free_area_cache = addr);
24455+ mm->cached_hole_size = largest_hole;
24456+ return (mm->free_area_cache = addr);
24457 } else if (mm->free_area_cache == vma->vm_end) {
24458 /* pull free_area_cache down to the first hole */
24459 mm->free_area_cache = vma->vm_start;
24460@@ -352,29 +358,34 @@ try_again:
24461
24462 /* remember the largest hole we saw so far */
24463 if (addr + largest_hole < vma->vm_start)
24464- largest_hole = vma->vm_start - addr;
24465+ largest_hole = vma->vm_start - addr;
24466
24467 /* try just below the current vma->vm_start */
24468- addr = (vma->vm_start - len) & huge_page_mask(h);
24469- } while (len <= vma->vm_start);
24470+ addr = skip_heap_stack_gap(vma, len);
24471+ } while (!IS_ERR_VALUE(addr));
24472
24473 fail:
24474 /*
24475- * if hint left us with no space for the requested
24476- * mapping then try again:
24477- */
24478- if (start_addr != base) {
24479- mm->free_area_cache = base;
24480- largest_hole = 0;
24481- goto try_again;
24482- }
24483- /*
24484 * A failed mmap() very likely causes application failure,
24485 * so fall back to the bottom-up function here. This scenario
24486 * can happen with large stack limits and large mmap()
24487 * allocations.
24488 */
24489- mm->free_area_cache = TASK_UNMAPPED_BASE;
24490+
24491+#ifdef CONFIG_PAX_SEGMEXEC
24492+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24493+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24494+ else
24495+#endif
24496+
24497+ mm->mmap_base = TASK_UNMAPPED_BASE;
24498+
24499+#ifdef CONFIG_PAX_RANDMMAP
24500+ if (mm->pax_flags & MF_PAX_RANDMMAP)
24501+ mm->mmap_base += mm->delta_mmap;
24502+#endif
24503+
24504+ mm->free_area_cache = mm->mmap_base;
24505 mm->cached_hole_size = ~0UL;
24506 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
24507 len, pgoff, flags);
24508@@ -382,6 +393,7 @@ fail:
24509 /*
24510 * Restore the topdown base:
24511 */
24512+ mm->mmap_base = base;
24513 mm->free_area_cache = base;
24514 mm->cached_hole_size = ~0UL;
24515
24516@@ -395,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24517 struct hstate *h = hstate_file(file);
24518 struct mm_struct *mm = current->mm;
24519 struct vm_area_struct *vma;
24520+ unsigned long pax_task_size = TASK_SIZE;
24521
24522 if (len & ~huge_page_mask(h))
24523 return -EINVAL;
24524- if (len > TASK_SIZE)
24525+
24526+#ifdef CONFIG_PAX_SEGMEXEC
24527+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24528+ pax_task_size = SEGMEXEC_TASK_SIZE;
24529+#endif
24530+
24531+ pax_task_size -= PAGE_SIZE;
24532+
24533+ if (len > pax_task_size)
24534 return -ENOMEM;
24535
24536 if (flags & MAP_FIXED) {
24537@@ -410,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24538 if (addr) {
24539 addr = ALIGN(addr, huge_page_size(h));
24540 vma = find_vma(mm, addr);
24541- if (TASK_SIZE - len >= addr &&
24542- (!vma || addr + len <= vma->vm_start))
24543+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
24544 return addr;
24545 }
24546 if (mm->get_unmapped_area == arch_get_unmapped_area)
24547diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
24548index 4f0cec7..00976ce 100644
24549--- a/arch/x86/mm/init.c
24550+++ b/arch/x86/mm/init.c
24551@@ -16,6 +16,8 @@
24552 #include <asm/tlb.h>
24553 #include <asm/proto.h>
24554 #include <asm/dma.h> /* for MAX_DMA_PFN */
24555+#include <asm/desc.h>
24556+#include <asm/bios_ebda.h>
24557
24558 unsigned long __initdata pgt_buf_start;
24559 unsigned long __meminitdata pgt_buf_end;
24560@@ -32,7 +34,7 @@ int direct_gbpages
24561 static void __init find_early_table_space(unsigned long end, int use_pse,
24562 int use_gbpages)
24563 {
24564- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
24565+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
24566 phys_addr_t base;
24567
24568 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
24569@@ -311,10 +313,37 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24570 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
24571 * mmio resources as well as potential bios/acpi data regions.
24572 */
24573+
24574+#ifdef CONFIG_GRKERNSEC_KMEM
24575+static unsigned int ebda_start __read_only;
24576+static unsigned int ebda_end __read_only;
24577+#endif
24578+
24579 int devmem_is_allowed(unsigned long pagenr)
24580 {
24581+#ifdef CONFIG_GRKERNSEC_KMEM
24582+ /* allow BDA */
24583+ if (!pagenr)
24584+ return 1;
24585+ /* allow EBDA */
24586+ if (pagenr >= ebda_start && pagenr < ebda_end)
24587+ return 1;
24588+#else
24589+ if (!pagenr)
24590+ return 1;
24591+#ifdef CONFIG_VM86
24592+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
24593+ return 1;
24594+#endif
24595+#endif
24596+
24597+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
24598+ return 1;
24599+#ifdef CONFIG_GRKERNSEC_KMEM
24600+ /* throw out everything else below 1MB */
24601 if (pagenr <= 256)
24602- return 1;
24603+ return 0;
24604+#endif
24605 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
24606 return 0;
24607 if (!page_is_ram(pagenr))
24608@@ -371,8 +400,116 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
24609 #endif
24610 }
24611
24612+#ifdef CONFIG_GRKERNSEC_KMEM
24613+static inline void gr_init_ebda(void)
24614+{
24615+ unsigned int ebda_addr;
24616+ unsigned int ebda_size = 0;
24617+
24618+ ebda_addr = get_bios_ebda();
24619+ if (ebda_addr) {
24620+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
24621+ ebda_size <<= 10;
24622+ }
24623+ if (ebda_addr && ebda_size) {
24624+ ebda_start = ebda_addr >> PAGE_SHIFT;
24625+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
24626+ } else {
24627+ ebda_start = 0x9f000 >> PAGE_SHIFT;
24628+ ebda_end = 0xa0000 >> PAGE_SHIFT;
24629+ }
24630+}
24631+#else
24632+static inline void gr_init_ebda(void) { }
24633+#endif
24634+
24635 void free_initmem(void)
24636 {
24637+#ifdef CONFIG_PAX_KERNEXEC
24638+#ifdef CONFIG_X86_32
24639+ /* PaX: limit KERNEL_CS to actual size */
24640+ unsigned long addr, limit;
24641+ struct desc_struct d;
24642+ int cpu;
24643+#else
24644+ pgd_t *pgd;
24645+ pud_t *pud;
24646+ pmd_t *pmd;
24647+ unsigned long addr, end;
24648+#endif
24649+#endif
24650+
24651+ gr_init_ebda();
24652+
24653+#ifdef CONFIG_PAX_KERNEXEC
24654+#ifdef CONFIG_X86_32
24655+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
24656+ limit = (limit - 1UL) >> PAGE_SHIFT;
24657+
24658+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
24659+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
24660+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
24661+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
24662+ }
24663+
24664+ /* PaX: make KERNEL_CS read-only */
24665+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
24666+ if (!paravirt_enabled())
24667+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
24668+/*
24669+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
24670+ pgd = pgd_offset_k(addr);
24671+ pud = pud_offset(pgd, addr);
24672+ pmd = pmd_offset(pud, addr);
24673+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24674+ }
24675+*/
24676+#ifdef CONFIG_X86_PAE
24677+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
24678+/*
24679+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
24680+ pgd = pgd_offset_k(addr);
24681+ pud = pud_offset(pgd, addr);
24682+ pmd = pmd_offset(pud, addr);
24683+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24684+ }
24685+*/
24686+#endif
24687+
24688+#ifdef CONFIG_MODULES
24689+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
24690+#endif
24691+
24692+#else
24693+ /* PaX: make kernel code/rodata read-only, rest non-executable */
24694+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
24695+ pgd = pgd_offset_k(addr);
24696+ pud = pud_offset(pgd, addr);
24697+ pmd = pmd_offset(pud, addr);
24698+ if (!pmd_present(*pmd))
24699+ continue;
24700+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
24701+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24702+ else
24703+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24704+ }
24705+
24706+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24707+ end = addr + KERNEL_IMAGE_SIZE;
24708+ for (; addr < end; addr += PMD_SIZE) {
24709+ pgd = pgd_offset_k(addr);
24710+ pud = pud_offset(pgd, addr);
24711+ pmd = pmd_offset(pud, addr);
24712+ if (!pmd_present(*pmd))
24713+ continue;
24714+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24715+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24716+ }
24717+#endif
24718+
24719+ flush_tlb_all();
24720+#endif
24721+
24722 free_init_pages("unused kernel memory",
24723 (unsigned long)(&__init_begin),
24724 (unsigned long)(&__init_end));
24725diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24726index 575d86f..4987469 100644
24727--- a/arch/x86/mm/init_32.c
24728+++ b/arch/x86/mm/init_32.c
24729@@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
24730 }
24731
24732 /*
24733- * Creates a middle page table and puts a pointer to it in the
24734- * given global directory entry. This only returns the gd entry
24735- * in non-PAE compilation mode, since the middle layer is folded.
24736- */
24737-static pmd_t * __init one_md_table_init(pgd_t *pgd)
24738-{
24739- pud_t *pud;
24740- pmd_t *pmd_table;
24741-
24742-#ifdef CONFIG_X86_PAE
24743- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24744- if (after_bootmem)
24745- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24746- else
24747- pmd_table = (pmd_t *)alloc_low_page();
24748- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24749- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24750- pud = pud_offset(pgd, 0);
24751- BUG_ON(pmd_table != pmd_offset(pud, 0));
24752-
24753- return pmd_table;
24754- }
24755-#endif
24756- pud = pud_offset(pgd, 0);
24757- pmd_table = pmd_offset(pud, 0);
24758-
24759- return pmd_table;
24760-}
24761-
24762-/*
24763 * Create a page table and place a pointer to it in a middle page
24764 * directory entry:
24765 */
24766@@ -122,13 +92,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
24767 page_table = (pte_t *)alloc_low_page();
24768
24769 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
24770+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24771+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
24772+#else
24773 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
24774+#endif
24775 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
24776 }
24777
24778 return pte_offset_kernel(pmd, 0);
24779 }
24780
24781+static pmd_t * __init one_md_table_init(pgd_t *pgd)
24782+{
24783+ pud_t *pud;
24784+ pmd_t *pmd_table;
24785+
24786+ pud = pud_offset(pgd, 0);
24787+ pmd_table = pmd_offset(pud, 0);
24788+
24789+ return pmd_table;
24790+}
24791+
24792 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
24793 {
24794 int pgd_idx = pgd_index(vaddr);
24795@@ -202,6 +187,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24796 int pgd_idx, pmd_idx;
24797 unsigned long vaddr;
24798 pgd_t *pgd;
24799+ pud_t *pud;
24800 pmd_t *pmd;
24801 pte_t *pte = NULL;
24802
24803@@ -211,8 +197,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24804 pgd = pgd_base + pgd_idx;
24805
24806 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
24807- pmd = one_md_table_init(pgd);
24808- pmd = pmd + pmd_index(vaddr);
24809+ pud = pud_offset(pgd, vaddr);
24810+ pmd = pmd_offset(pud, vaddr);
24811+
24812+#ifdef CONFIG_X86_PAE
24813+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24814+#endif
24815+
24816 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
24817 pmd++, pmd_idx++) {
24818 pte = page_table_kmap_check(one_page_table_init(pmd),
24819@@ -224,11 +215,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24820 }
24821 }
24822
24823-static inline int is_kernel_text(unsigned long addr)
24824+static inline int is_kernel_text(unsigned long start, unsigned long end)
24825 {
24826- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
24827- return 1;
24828- return 0;
24829+ if ((start > ktla_ktva((unsigned long)_etext) ||
24830+ end <= ktla_ktva((unsigned long)_stext)) &&
24831+ (start > ktla_ktva((unsigned long)_einittext) ||
24832+ end <= ktla_ktva((unsigned long)_sinittext)) &&
24833+
24834+#ifdef CONFIG_ACPI_SLEEP
24835+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
24836+#endif
24837+
24838+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
24839+ return 0;
24840+ return 1;
24841 }
24842
24843 /*
24844@@ -245,9 +245,10 @@ kernel_physical_mapping_init(unsigned long start,
24845 unsigned long last_map_addr = end;
24846 unsigned long start_pfn, end_pfn;
24847 pgd_t *pgd_base = swapper_pg_dir;
24848- int pgd_idx, pmd_idx, pte_ofs;
24849+ unsigned int pgd_idx, pmd_idx, pte_ofs;
24850 unsigned long pfn;
24851 pgd_t *pgd;
24852+ pud_t *pud;
24853 pmd_t *pmd;
24854 pte_t *pte;
24855 unsigned pages_2m, pages_4k;
24856@@ -280,8 +281,13 @@ repeat:
24857 pfn = start_pfn;
24858 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24859 pgd = pgd_base + pgd_idx;
24860- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
24861- pmd = one_md_table_init(pgd);
24862+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
24863+ pud = pud_offset(pgd, 0);
24864+ pmd = pmd_offset(pud, 0);
24865+
24866+#ifdef CONFIG_X86_PAE
24867+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24868+#endif
24869
24870 if (pfn >= end_pfn)
24871 continue;
24872@@ -293,14 +299,13 @@ repeat:
24873 #endif
24874 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
24875 pmd++, pmd_idx++) {
24876- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
24877+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
24878
24879 /*
24880 * Map with big pages if possible, otherwise
24881 * create normal page tables:
24882 */
24883 if (use_pse) {
24884- unsigned int addr2;
24885 pgprot_t prot = PAGE_KERNEL_LARGE;
24886 /*
24887 * first pass will use the same initial
24888@@ -310,11 +315,7 @@ repeat:
24889 __pgprot(PTE_IDENT_ATTR |
24890 _PAGE_PSE);
24891
24892- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
24893- PAGE_OFFSET + PAGE_SIZE-1;
24894-
24895- if (is_kernel_text(addr) ||
24896- is_kernel_text(addr2))
24897+ if (is_kernel_text(address, address + PMD_SIZE))
24898 prot = PAGE_KERNEL_LARGE_EXEC;
24899
24900 pages_2m++;
24901@@ -331,7 +332,7 @@ repeat:
24902 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24903 pte += pte_ofs;
24904 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
24905- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
24906+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
24907 pgprot_t prot = PAGE_KERNEL;
24908 /*
24909 * first pass will use the same initial
24910@@ -339,7 +340,7 @@ repeat:
24911 */
24912 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
24913
24914- if (is_kernel_text(addr))
24915+ if (is_kernel_text(address, address + PAGE_SIZE))
24916 prot = PAGE_KERNEL_EXEC;
24917
24918 pages_4k++;
24919@@ -465,7 +466,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
24920
24921 pud = pud_offset(pgd, va);
24922 pmd = pmd_offset(pud, va);
24923- if (!pmd_present(*pmd))
24924+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
24925 break;
24926
24927 pte = pte_offset_kernel(pmd, va);
24928@@ -517,12 +518,10 @@ void __init early_ioremap_page_table_range_init(void)
24929
24930 static void __init pagetable_init(void)
24931 {
24932- pgd_t *pgd_base = swapper_pg_dir;
24933-
24934- permanent_kmaps_init(pgd_base);
24935+ permanent_kmaps_init(swapper_pg_dir);
24936 }
24937
24938-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24939+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24940 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24941
24942 /* user-defined highmem size */
24943@@ -734,6 +733,12 @@ void __init mem_init(void)
24944
24945 pci_iommu_alloc();
24946
24947+#ifdef CONFIG_PAX_PER_CPU_PGD
24948+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
24949+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24950+ KERNEL_PGD_PTRS);
24951+#endif
24952+
24953 #ifdef CONFIG_FLATMEM
24954 BUG_ON(!mem_map);
24955 #endif
24956@@ -760,7 +765,7 @@ void __init mem_init(void)
24957 reservedpages++;
24958
24959 codesize = (unsigned long) &_etext - (unsigned long) &_text;
24960- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
24961+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
24962 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
24963
24964 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
24965@@ -801,10 +806,10 @@ void __init mem_init(void)
24966 ((unsigned long)&__init_end -
24967 (unsigned long)&__init_begin) >> 10,
24968
24969- (unsigned long)&_etext, (unsigned long)&_edata,
24970- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
24971+ (unsigned long)&_sdata, (unsigned long)&_edata,
24972+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
24973
24974- (unsigned long)&_text, (unsigned long)&_etext,
24975+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
24976 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
24977
24978 /*
24979@@ -882,6 +887,7 @@ void set_kernel_text_rw(void)
24980 if (!kernel_set_to_readonly)
24981 return;
24982
24983+ start = ktla_ktva(start);
24984 pr_debug("Set kernel text: %lx - %lx for read write\n",
24985 start, start+size);
24986
24987@@ -896,6 +902,7 @@ void set_kernel_text_ro(void)
24988 if (!kernel_set_to_readonly)
24989 return;
24990
24991+ start = ktla_ktva(start);
24992 pr_debug("Set kernel text: %lx - %lx for read only\n",
24993 start, start+size);
24994
24995@@ -924,6 +931,7 @@ void mark_rodata_ro(void)
24996 unsigned long start = PFN_ALIGN(_text);
24997 unsigned long size = PFN_ALIGN(_etext) - start;
24998
24999+ start = ktla_ktva(start);
25000 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
25001 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
25002 size >> 10);
25003diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
25004index fc18be0..e539653 100644
25005--- a/arch/x86/mm/init_64.c
25006+++ b/arch/x86/mm/init_64.c
25007@@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpages_on);
25008 * around without checking the pgd every time.
25009 */
25010
25011-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
25012+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
25013 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25014
25015 int force_personality32;
25016@@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25017
25018 for (address = start; address <= end; address += PGDIR_SIZE) {
25019 const pgd_t *pgd_ref = pgd_offset_k(address);
25020+
25021+#ifdef CONFIG_PAX_PER_CPU_PGD
25022+ unsigned long cpu;
25023+#else
25024 struct page *page;
25025+#endif
25026
25027 if (pgd_none(*pgd_ref))
25028 continue;
25029
25030 spin_lock(&pgd_lock);
25031+
25032+#ifdef CONFIG_PAX_PER_CPU_PGD
25033+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25034+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
25035+#else
25036 list_for_each_entry(page, &pgd_list, lru) {
25037 pgd_t *pgd;
25038 spinlock_t *pgt_lock;
25039@@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25040 /* the pgt_lock only for Xen */
25041 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
25042 spin_lock(pgt_lock);
25043+#endif
25044
25045 if (pgd_none(*pgd))
25046 set_pgd(pgd, *pgd_ref);
25047@@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25048 BUG_ON(pgd_page_vaddr(*pgd)
25049 != pgd_page_vaddr(*pgd_ref));
25050
25051+#ifndef CONFIG_PAX_PER_CPU_PGD
25052 spin_unlock(pgt_lock);
25053+#endif
25054+
25055 }
25056 spin_unlock(&pgd_lock);
25057 }
25058@@ -161,7 +175,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
25059 {
25060 if (pgd_none(*pgd)) {
25061 pud_t *pud = (pud_t *)spp_getpage();
25062- pgd_populate(&init_mm, pgd, pud);
25063+ pgd_populate_kernel(&init_mm, pgd, pud);
25064 if (pud != pud_offset(pgd, 0))
25065 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
25066 pud, pud_offset(pgd, 0));
25067@@ -173,7 +187,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
25068 {
25069 if (pud_none(*pud)) {
25070 pmd_t *pmd = (pmd_t *) spp_getpage();
25071- pud_populate(&init_mm, pud, pmd);
25072+ pud_populate_kernel(&init_mm, pud, pmd);
25073 if (pmd != pmd_offset(pud, 0))
25074 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
25075 pmd, pmd_offset(pud, 0));
25076@@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25077 pmd = fill_pmd(pud, vaddr);
25078 pte = fill_pte(pmd, vaddr);
25079
25080+ pax_open_kernel();
25081 set_pte(pte, new_pte);
25082+ pax_close_kernel();
25083
25084 /*
25085 * It's enough to flush this one mapping.
25086@@ -261,14 +277,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25087 pgd = pgd_offset_k((unsigned long)__va(phys));
25088 if (pgd_none(*pgd)) {
25089 pud = (pud_t *) spp_getpage();
25090- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25091- _PAGE_USER));
25092+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25093 }
25094 pud = pud_offset(pgd, (unsigned long)__va(phys));
25095 if (pud_none(*pud)) {
25096 pmd = (pmd_t *) spp_getpage();
25097- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25098- _PAGE_USER));
25099+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25100 }
25101 pmd = pmd_offset(pud, phys);
25102 BUG_ON(!pmd_none(*pmd));
25103@@ -329,7 +343,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
25104 if (pfn >= pgt_buf_top)
25105 panic("alloc_low_page: ran out of memory");
25106
25107- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25108+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25109 clear_page(adr);
25110 *phys = pfn * PAGE_SIZE;
25111 return adr;
25112@@ -345,7 +359,7 @@ static __ref void *map_low_page(void *virt)
25113
25114 phys = __pa(virt);
25115 left = phys & (PAGE_SIZE - 1);
25116- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25117+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25118 adr = (void *)(((unsigned long)adr) | left);
25119
25120 return adr;
25121@@ -545,7 +559,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
25122 unmap_low_page(pmd);
25123
25124 spin_lock(&init_mm.page_table_lock);
25125- pud_populate(&init_mm, pud, __va(pmd_phys));
25126+ pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
25127 spin_unlock(&init_mm.page_table_lock);
25128 }
25129 __flush_tlb_all();
25130@@ -591,7 +605,7 @@ kernel_physical_mapping_init(unsigned long start,
25131 unmap_low_page(pud);
25132
25133 spin_lock(&init_mm.page_table_lock);
25134- pgd_populate(&init_mm, pgd, __va(pud_phys));
25135+ pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
25136 spin_unlock(&init_mm.page_table_lock);
25137 pgd_changed = true;
25138 }
25139@@ -683,6 +697,12 @@ void __init mem_init(void)
25140
25141 pci_iommu_alloc();
25142
25143+#ifdef CONFIG_PAX_PER_CPU_PGD
25144+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25145+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25146+ KERNEL_PGD_PTRS);
25147+#endif
25148+
25149 /* clear_bss() already clear the empty_zero_page */
25150
25151 reservedpages = 0;
25152@@ -843,8 +863,8 @@ int kern_addr_valid(unsigned long addr)
25153 static struct vm_area_struct gate_vma = {
25154 .vm_start = VSYSCALL_START,
25155 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25156- .vm_page_prot = PAGE_READONLY_EXEC,
25157- .vm_flags = VM_READ | VM_EXEC
25158+ .vm_page_prot = PAGE_READONLY,
25159+ .vm_flags = VM_READ
25160 };
25161
25162 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
25163@@ -878,7 +898,7 @@ int in_gate_area_no_mm(unsigned long addr)
25164
25165 const char *arch_vma_name(struct vm_area_struct *vma)
25166 {
25167- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25168+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25169 return "[vdso]";
25170 if (vma == &gate_vma)
25171 return "[vsyscall]";
25172diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25173index 7b179b4..6bd1777 100644
25174--- a/arch/x86/mm/iomap_32.c
25175+++ b/arch/x86/mm/iomap_32.c
25176@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
25177 type = kmap_atomic_idx_push();
25178 idx = type + KM_TYPE_NR * smp_processor_id();
25179 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25180+
25181+ pax_open_kernel();
25182 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25183+ pax_close_kernel();
25184+
25185 arch_flush_lazy_mmu_mode();
25186
25187 return (void *)vaddr;
25188diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25189index be1ef57..55f0160 100644
25190--- a/arch/x86/mm/ioremap.c
25191+++ b/arch/x86/mm/ioremap.c
25192@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25193 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
25194 int is_ram = page_is_ram(pfn);
25195
25196- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25197+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25198 return NULL;
25199 WARN_ON_ONCE(is_ram);
25200 }
25201@@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
25202
25203 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
25204 if (page_is_ram(start >> PAGE_SHIFT))
25205+#ifdef CONFIG_HIGHMEM
25206+ if ((start >> PAGE_SHIFT) < max_low_pfn)
25207+#endif
25208 return __va(phys);
25209
25210 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
25211@@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
25212 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25213
25214 static __initdata int after_paging_init;
25215-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25216+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25217
25218 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25219 {
25220@@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
25221 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25222
25223 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25224- memset(bm_pte, 0, sizeof(bm_pte));
25225- pmd_populate_kernel(&init_mm, pmd, bm_pte);
25226+ pmd_populate_user(&init_mm, pmd, bm_pte);
25227
25228 /*
25229 * The boot-ioremap range spans multiple pmds, for which
25230diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25231index d87dd6d..bf3fa66 100644
25232--- a/arch/x86/mm/kmemcheck/kmemcheck.c
25233+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25234@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25235 * memory (e.g. tracked pages)? For now, we need this to avoid
25236 * invoking kmemcheck for PnP BIOS calls.
25237 */
25238- if (regs->flags & X86_VM_MASK)
25239+ if (v8086_mode(regs))
25240 return false;
25241- if (regs->cs != __KERNEL_CS)
25242+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25243 return false;
25244
25245 pte = kmemcheck_pte_lookup(address);
25246diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25247index 845df68..1d8d29f 100644
25248--- a/arch/x86/mm/mmap.c
25249+++ b/arch/x86/mm/mmap.c
25250@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
25251 * Leave an at least ~128 MB hole with possible stack randomization.
25252 */
25253 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25254-#define MAX_GAP (TASK_SIZE/6*5)
25255+#define MAX_GAP (pax_task_size/6*5)
25256
25257 static int mmap_is_legacy(void)
25258 {
25259@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
25260 return rnd << PAGE_SHIFT;
25261 }
25262
25263-static unsigned long mmap_base(void)
25264+static unsigned long mmap_base(struct mm_struct *mm)
25265 {
25266 unsigned long gap = rlimit(RLIMIT_STACK);
25267+ unsigned long pax_task_size = TASK_SIZE;
25268+
25269+#ifdef CONFIG_PAX_SEGMEXEC
25270+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25271+ pax_task_size = SEGMEXEC_TASK_SIZE;
25272+#endif
25273
25274 if (gap < MIN_GAP)
25275 gap = MIN_GAP;
25276 else if (gap > MAX_GAP)
25277 gap = MAX_GAP;
25278
25279- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25280+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25281 }
25282
25283 /*
25284 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25285 * does, but not when emulating X86_32
25286 */
25287-static unsigned long mmap_legacy_base(void)
25288+static unsigned long mmap_legacy_base(struct mm_struct *mm)
25289 {
25290- if (mmap_is_ia32())
25291+ if (mmap_is_ia32()) {
25292+
25293+#ifdef CONFIG_PAX_SEGMEXEC
25294+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25295+ return SEGMEXEC_TASK_UNMAPPED_BASE;
25296+ else
25297+#endif
25298+
25299 return TASK_UNMAPPED_BASE;
25300- else
25301+ } else
25302 return TASK_UNMAPPED_BASE + mmap_rnd();
25303 }
25304
25305@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
25306 void arch_pick_mmap_layout(struct mm_struct *mm)
25307 {
25308 if (mmap_is_legacy()) {
25309- mm->mmap_base = mmap_legacy_base();
25310+ mm->mmap_base = mmap_legacy_base(mm);
25311+
25312+#ifdef CONFIG_PAX_RANDMMAP
25313+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25314+ mm->mmap_base += mm->delta_mmap;
25315+#endif
25316+
25317 mm->get_unmapped_area = arch_get_unmapped_area;
25318 mm->unmap_area = arch_unmap_area;
25319 } else {
25320- mm->mmap_base = mmap_base();
25321+ mm->mmap_base = mmap_base(mm);
25322+
25323+#ifdef CONFIG_PAX_RANDMMAP
25324+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25325+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25326+#endif
25327+
25328 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25329 mm->unmap_area = arch_unmap_area_topdown;
25330 }
25331diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25332index dc0b727..dc9d71a 100644
25333--- a/arch/x86/mm/mmio-mod.c
25334+++ b/arch/x86/mm/mmio-mod.c
25335@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25336 break;
25337 default:
25338 {
25339- unsigned char *ip = (unsigned char *)instptr;
25340+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25341 my_trace->opcode = MMIO_UNKNOWN_OP;
25342 my_trace->width = 0;
25343 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25344@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25345 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25346 void __iomem *addr)
25347 {
25348- static atomic_t next_id;
25349+ static atomic_unchecked_t next_id;
25350 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25351 /* These are page-unaligned. */
25352 struct mmiotrace_map map = {
25353@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25354 .private = trace
25355 },
25356 .phys = offset,
25357- .id = atomic_inc_return(&next_id)
25358+ .id = atomic_inc_return_unchecked(&next_id)
25359 };
25360 map.map_id = trace->id;
25361
25362diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25363index b008656..773eac2 100644
25364--- a/arch/x86/mm/pageattr-test.c
25365+++ b/arch/x86/mm/pageattr-test.c
25366@@ -36,7 +36,7 @@ enum {
25367
25368 static int pte_testbit(pte_t pte)
25369 {
25370- return pte_flags(pte) & _PAGE_UNUSED1;
25371+ return pte_flags(pte) & _PAGE_CPA_TEST;
25372 }
25373
25374 struct split_state {
25375diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25376index e1ebde3..b1e1db38 100644
25377--- a/arch/x86/mm/pageattr.c
25378+++ b/arch/x86/mm/pageattr.c
25379@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25380 */
25381 #ifdef CONFIG_PCI_BIOS
25382 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25383- pgprot_val(forbidden) |= _PAGE_NX;
25384+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25385 #endif
25386
25387 /*
25388@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25389 * Does not cover __inittext since that is gone later on. On
25390 * 64bit we do not enforce !NX on the low mapping
25391 */
25392- if (within(address, (unsigned long)_text, (unsigned long)_etext))
25393- pgprot_val(forbidden) |= _PAGE_NX;
25394+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25395+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25396
25397+#ifdef CONFIG_DEBUG_RODATA
25398 /*
25399 * The .rodata section needs to be read-only. Using the pfn
25400 * catches all aliases.
25401@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25402 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25403 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25404 pgprot_val(forbidden) |= _PAGE_RW;
25405+#endif
25406
25407 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
25408 /*
25409@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25410 }
25411 #endif
25412
25413+#ifdef CONFIG_PAX_KERNEXEC
25414+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25415+ pgprot_val(forbidden) |= _PAGE_RW;
25416+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25417+ }
25418+#endif
25419+
25420 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25421
25422 return prot;
25423@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25424 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25425 {
25426 /* change init_mm */
25427+ pax_open_kernel();
25428 set_pte_atomic(kpte, pte);
25429+
25430 #ifdef CONFIG_X86_32
25431 if (!SHARED_KERNEL_PMD) {
25432+
25433+#ifdef CONFIG_PAX_PER_CPU_PGD
25434+ unsigned long cpu;
25435+#else
25436 struct page *page;
25437+#endif
25438
25439+#ifdef CONFIG_PAX_PER_CPU_PGD
25440+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25441+ pgd_t *pgd = get_cpu_pgd(cpu);
25442+#else
25443 list_for_each_entry(page, &pgd_list, lru) {
25444- pgd_t *pgd;
25445+ pgd_t *pgd = (pgd_t *)page_address(page);
25446+#endif
25447+
25448 pud_t *pud;
25449 pmd_t *pmd;
25450
25451- pgd = (pgd_t *)page_address(page) + pgd_index(address);
25452+ pgd += pgd_index(address);
25453 pud = pud_offset(pgd, address);
25454 pmd = pmd_offset(pud, address);
25455 set_pte_atomic((pte_t *)pmd, pte);
25456 }
25457 }
25458 #endif
25459+ pax_close_kernel();
25460 }
25461
25462 static int
25463diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25464index f6ff57b..481690f 100644
25465--- a/arch/x86/mm/pat.c
25466+++ b/arch/x86/mm/pat.c
25467@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
25468
25469 if (!entry) {
25470 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25471- current->comm, current->pid, start, end);
25472+ current->comm, task_pid_nr(current), start, end);
25473 return -EINVAL;
25474 }
25475
25476@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25477 while (cursor < to) {
25478 if (!devmem_is_allowed(pfn)) {
25479 printk(KERN_INFO
25480- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25481- current->comm, from, to);
25482+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25483+ current->comm, from, to, cursor);
25484 return 0;
25485 }
25486 cursor += PAGE_SIZE;
25487@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25488 printk(KERN_INFO
25489 "%s:%d ioremap_change_attr failed %s "
25490 "for %Lx-%Lx\n",
25491- current->comm, current->pid,
25492+ current->comm, task_pid_nr(current),
25493 cattr_name(flags),
25494 base, (unsigned long long)(base + size));
25495 return -EINVAL;
25496@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25497 if (want_flags != flags) {
25498 printk(KERN_WARNING
25499 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
25500- current->comm, current->pid,
25501+ current->comm, task_pid_nr(current),
25502 cattr_name(want_flags),
25503 (unsigned long long)paddr,
25504 (unsigned long long)(paddr + size),
25505@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25506 free_memtype(paddr, paddr + size);
25507 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25508 " for %Lx-%Lx, got %s\n",
25509- current->comm, current->pid,
25510+ current->comm, task_pid_nr(current),
25511 cattr_name(want_flags),
25512 (unsigned long long)paddr,
25513 (unsigned long long)(paddr + size),
25514diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25515index 9f0614d..92ae64a 100644
25516--- a/arch/x86/mm/pf_in.c
25517+++ b/arch/x86/mm/pf_in.c
25518@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25519 int i;
25520 enum reason_type rv = OTHERS;
25521
25522- p = (unsigned char *)ins_addr;
25523+ p = (unsigned char *)ktla_ktva(ins_addr);
25524 p += skip_prefix(p, &prf);
25525 p += get_opcode(p, &opcode);
25526
25527@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25528 struct prefix_bits prf;
25529 int i;
25530
25531- p = (unsigned char *)ins_addr;
25532+ p = (unsigned char *)ktla_ktva(ins_addr);
25533 p += skip_prefix(p, &prf);
25534 p += get_opcode(p, &opcode);
25535
25536@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25537 struct prefix_bits prf;
25538 int i;
25539
25540- p = (unsigned char *)ins_addr;
25541+ p = (unsigned char *)ktla_ktva(ins_addr);
25542 p += skip_prefix(p, &prf);
25543 p += get_opcode(p, &opcode);
25544
25545@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25546 struct prefix_bits prf;
25547 int i;
25548
25549- p = (unsigned char *)ins_addr;
25550+ p = (unsigned char *)ktla_ktva(ins_addr);
25551 p += skip_prefix(p, &prf);
25552 p += get_opcode(p, &opcode);
25553 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25554@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25555 struct prefix_bits prf;
25556 int i;
25557
25558- p = (unsigned char *)ins_addr;
25559+ p = (unsigned char *)ktla_ktva(ins_addr);
25560 p += skip_prefix(p, &prf);
25561 p += get_opcode(p, &opcode);
25562 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25563diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25564index 8573b83..4f3ed7e 100644
25565--- a/arch/x86/mm/pgtable.c
25566+++ b/arch/x86/mm/pgtable.c
25567@@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
25568 list_del(&page->lru);
25569 }
25570
25571-#define UNSHARED_PTRS_PER_PGD \
25572- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25573+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25574+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25575
25576+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
25577+{
25578+ unsigned int count = USER_PGD_PTRS;
25579
25580+ while (count--)
25581+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
25582+}
25583+#endif
25584+
25585+#ifdef CONFIG_PAX_PER_CPU_PGD
25586+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
25587+{
25588+ unsigned int count = USER_PGD_PTRS;
25589+
25590+ while (count--) {
25591+ pgd_t pgd;
25592+
25593+#ifdef CONFIG_X86_64
25594+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
25595+#else
25596+ pgd = *src++;
25597+#endif
25598+
25599+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25600+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
25601+#endif
25602+
25603+ *dst++ = pgd;
25604+ }
25605+
25606+}
25607+#endif
25608+
25609+#ifdef CONFIG_X86_64
25610+#define pxd_t pud_t
25611+#define pyd_t pgd_t
25612+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
25613+#define pxd_free(mm, pud) pud_free((mm), (pud))
25614+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
25615+#define pyd_offset(mm, address) pgd_offset((mm), (address))
25616+#define PYD_SIZE PGDIR_SIZE
25617+#else
25618+#define pxd_t pmd_t
25619+#define pyd_t pud_t
25620+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
25621+#define pxd_free(mm, pud) pmd_free((mm), (pud))
25622+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
25623+#define pyd_offset(mm, address) pud_offset((mm), (address))
25624+#define PYD_SIZE PUD_SIZE
25625+#endif
25626+
25627+#ifdef CONFIG_PAX_PER_CPU_PGD
25628+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
25629+static inline void pgd_dtor(pgd_t *pgd) {}
25630+#else
25631 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
25632 {
25633 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
25634@@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
25635 pgd_list_del(pgd);
25636 spin_unlock(&pgd_lock);
25637 }
25638+#endif
25639
25640 /*
25641 * List of all pgd's needed for non-PAE so it can invalidate entries
25642@@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
25643 * -- wli
25644 */
25645
25646-#ifdef CONFIG_X86_PAE
25647+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25648 /*
25649 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
25650 * updating the top-level pagetable entries to guarantee the
25651@@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
25652 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
25653 * and initialize the kernel pmds here.
25654 */
25655-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
25656+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25657
25658 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25659 {
25660@@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25661 */
25662 flush_tlb_mm(mm);
25663 }
25664+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
25665+#define PREALLOCATED_PXDS USER_PGD_PTRS
25666 #else /* !CONFIG_X86_PAE */
25667
25668 /* No need to prepopulate any pagetable entries in non-PAE modes. */
25669-#define PREALLOCATED_PMDS 0
25670+#define PREALLOCATED_PXDS 0
25671
25672 #endif /* CONFIG_X86_PAE */
25673
25674-static void free_pmds(pmd_t *pmds[])
25675+static void free_pxds(pxd_t *pxds[])
25676 {
25677 int i;
25678
25679- for(i = 0; i < PREALLOCATED_PMDS; i++)
25680- if (pmds[i])
25681- free_page((unsigned long)pmds[i]);
25682+ for(i = 0; i < PREALLOCATED_PXDS; i++)
25683+ if (pxds[i])
25684+ free_page((unsigned long)pxds[i]);
25685 }
25686
25687-static int preallocate_pmds(pmd_t *pmds[])
25688+static int preallocate_pxds(pxd_t *pxds[])
25689 {
25690 int i;
25691 bool failed = false;
25692
25693- for(i = 0; i < PREALLOCATED_PMDS; i++) {
25694- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
25695- if (pmd == NULL)
25696+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
25697+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
25698+ if (pxd == NULL)
25699 failed = true;
25700- pmds[i] = pmd;
25701+ pxds[i] = pxd;
25702 }
25703
25704 if (failed) {
25705- free_pmds(pmds);
25706+ free_pxds(pxds);
25707 return -ENOMEM;
25708 }
25709
25710@@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[])
25711 * preallocate which never got a corresponding vma will need to be
25712 * freed manually.
25713 */
25714-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
25715+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
25716 {
25717 int i;
25718
25719- for(i = 0; i < PREALLOCATED_PMDS; i++) {
25720+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
25721 pgd_t pgd = pgdp[i];
25722
25723 if (pgd_val(pgd) != 0) {
25724- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
25725+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
25726
25727- pgdp[i] = native_make_pgd(0);
25728+ set_pgd(pgdp + i, native_make_pgd(0));
25729
25730- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
25731- pmd_free(mm, pmd);
25732+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
25733+ pxd_free(mm, pxd);
25734 }
25735 }
25736 }
25737
25738-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
25739+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
25740 {
25741- pud_t *pud;
25742+ pyd_t *pyd;
25743 unsigned long addr;
25744 int i;
25745
25746- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
25747+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
25748 return;
25749
25750- pud = pud_offset(pgd, 0);
25751+#ifdef CONFIG_X86_64
25752+ pyd = pyd_offset(mm, 0L);
25753+#else
25754+ pyd = pyd_offset(pgd, 0L);
25755+#endif
25756
25757- for (addr = i = 0; i < PREALLOCATED_PMDS;
25758- i++, pud++, addr += PUD_SIZE) {
25759- pmd_t *pmd = pmds[i];
25760+ for (addr = i = 0; i < PREALLOCATED_PXDS;
25761+ i++, pyd++, addr += PYD_SIZE) {
25762+ pxd_t *pxd = pxds[i];
25763
25764 if (i >= KERNEL_PGD_BOUNDARY)
25765- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25766- sizeof(pmd_t) * PTRS_PER_PMD);
25767+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25768+ sizeof(pxd_t) * PTRS_PER_PMD);
25769
25770- pud_populate(mm, pud, pmd);
25771+ pyd_populate(mm, pyd, pxd);
25772 }
25773 }
25774
25775 pgd_t *pgd_alloc(struct mm_struct *mm)
25776 {
25777 pgd_t *pgd;
25778- pmd_t *pmds[PREALLOCATED_PMDS];
25779+ pxd_t *pxds[PREALLOCATED_PXDS];
25780
25781 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25782
25783@@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25784
25785 mm->pgd = pgd;
25786
25787- if (preallocate_pmds(pmds) != 0)
25788+ if (preallocate_pxds(pxds) != 0)
25789 goto out_free_pgd;
25790
25791 if (paravirt_pgd_alloc(mm) != 0)
25792- goto out_free_pmds;
25793+ goto out_free_pxds;
25794
25795 /*
25796 * Make sure that pre-populating the pmds is atomic with
25797@@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25798 spin_lock(&pgd_lock);
25799
25800 pgd_ctor(mm, pgd);
25801- pgd_prepopulate_pmd(mm, pgd, pmds);
25802+ pgd_prepopulate_pxd(mm, pgd, pxds);
25803
25804 spin_unlock(&pgd_lock);
25805
25806 return pgd;
25807
25808-out_free_pmds:
25809- free_pmds(pmds);
25810+out_free_pxds:
25811+ free_pxds(pxds);
25812 out_free_pgd:
25813 free_page((unsigned long)pgd);
25814 out:
25815@@ -295,7 +356,7 @@ out:
25816
25817 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
25818 {
25819- pgd_mop_up_pmds(mm, pgd);
25820+ pgd_mop_up_pxds(mm, pgd);
25821 pgd_dtor(pgd);
25822 paravirt_pgd_free(mm, pgd);
25823 free_page((unsigned long)pgd);
25824diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
25825index a69bcb8..19068ab 100644
25826--- a/arch/x86/mm/pgtable_32.c
25827+++ b/arch/x86/mm/pgtable_32.c
25828@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
25829 return;
25830 }
25831 pte = pte_offset_kernel(pmd, vaddr);
25832+
25833+ pax_open_kernel();
25834 if (pte_val(pteval))
25835 set_pte_at(&init_mm, vaddr, pte, pteval);
25836 else
25837 pte_clear(&init_mm, vaddr, pte);
25838+ pax_close_kernel();
25839
25840 /*
25841 * It's enough to flush this one mapping.
25842diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
25843index 410531d..0f16030 100644
25844--- a/arch/x86/mm/setup_nx.c
25845+++ b/arch/x86/mm/setup_nx.c
25846@@ -5,8 +5,10 @@
25847 #include <asm/pgtable.h>
25848 #include <asm/proto.h>
25849
25850+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25851 static int disable_nx __cpuinitdata;
25852
25853+#ifndef CONFIG_PAX_PAGEEXEC
25854 /*
25855 * noexec = on|off
25856 *
25857@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
25858 return 0;
25859 }
25860 early_param("noexec", noexec_setup);
25861+#endif
25862+
25863+#endif
25864
25865 void __cpuinit x86_configure_nx(void)
25866 {
25867+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25868 if (cpu_has_nx && !disable_nx)
25869 __supported_pte_mask |= _PAGE_NX;
25870 else
25871+#endif
25872 __supported_pte_mask &= ~_PAGE_NX;
25873 }
25874
25875diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
25876index d6c0418..06a0ad5 100644
25877--- a/arch/x86/mm/tlb.c
25878+++ b/arch/x86/mm/tlb.c
25879@@ -65,7 +65,11 @@ void leave_mm(int cpu)
25880 BUG();
25881 cpumask_clear_cpu(cpu,
25882 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
25883+
25884+#ifndef CONFIG_PAX_PER_CPU_PGD
25885 load_cr3(swapper_pg_dir);
25886+#endif
25887+
25888 }
25889 EXPORT_SYMBOL_GPL(leave_mm);
25890
25891diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
25892index 877b9a1..a8ecf42 100644
25893--- a/arch/x86/net/bpf_jit.S
25894+++ b/arch/x86/net/bpf_jit.S
25895@@ -9,6 +9,7 @@
25896 */
25897 #include <linux/linkage.h>
25898 #include <asm/dwarf2.h>
25899+#include <asm/alternative-asm.h>
25900
25901 /*
25902 * Calling convention :
25903@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
25904 jle bpf_slow_path_word
25905 mov (SKBDATA,%rsi),%eax
25906 bswap %eax /* ntohl() */
25907+ pax_force_retaddr
25908 ret
25909
25910 sk_load_half:
25911@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
25912 jle bpf_slow_path_half
25913 movzwl (SKBDATA,%rsi),%eax
25914 rol $8,%ax # ntohs()
25915+ pax_force_retaddr
25916 ret
25917
25918 sk_load_byte:
25919@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
25920 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
25921 jle bpf_slow_path_byte
25922 movzbl (SKBDATA,%rsi),%eax
25923+ pax_force_retaddr
25924 ret
25925
25926 /**
25927@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
25928 movzbl (SKBDATA,%rsi),%ebx
25929 and $15,%bl
25930 shl $2,%bl
25931+ pax_force_retaddr
25932 ret
25933
25934 /* rsi contains offset and can be scratched */
25935@@ -109,6 +114,7 @@ bpf_slow_path_word:
25936 js bpf_error
25937 mov -12(%rbp),%eax
25938 bswap %eax
25939+ pax_force_retaddr
25940 ret
25941
25942 bpf_slow_path_half:
25943@@ -117,12 +123,14 @@ bpf_slow_path_half:
25944 mov -12(%rbp),%ax
25945 rol $8,%ax
25946 movzwl %ax,%eax
25947+ pax_force_retaddr
25948 ret
25949
25950 bpf_slow_path_byte:
25951 bpf_slow_path_common(1)
25952 js bpf_error
25953 movzbl -12(%rbp),%eax
25954+ pax_force_retaddr
25955 ret
25956
25957 bpf_slow_path_byte_msh:
25958@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
25959 and $15,%al
25960 shl $2,%al
25961 xchg %eax,%ebx
25962+ pax_force_retaddr
25963 ret
25964
25965 #define sk_negative_common(SIZE) \
25966@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
25967 sk_negative_common(4)
25968 mov (%rax), %eax
25969 bswap %eax
25970+ pax_force_retaddr
25971 ret
25972
25973 bpf_slow_path_half_neg:
25974@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
25975 mov (%rax),%ax
25976 rol $8,%ax
25977 movzwl %ax,%eax
25978+ pax_force_retaddr
25979 ret
25980
25981 bpf_slow_path_byte_neg:
25982@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
25983 .globl sk_load_byte_negative_offset
25984 sk_negative_common(1)
25985 movzbl (%rax), %eax
25986+ pax_force_retaddr
25987 ret
25988
25989 bpf_slow_path_byte_msh_neg:
25990@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
25991 and $15,%al
25992 shl $2,%al
25993 xchg %eax,%ebx
25994+ pax_force_retaddr
25995 ret
25996
25997 bpf_error:
25998@@ -197,4 +210,5 @@ bpf_error:
25999 xor %eax,%eax
26000 mov -8(%rbp),%rbx
26001 leaveq
26002+ pax_force_retaddr
26003 ret
26004diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
26005index 0597f95..a12c36e 100644
26006--- a/arch/x86/net/bpf_jit_comp.c
26007+++ b/arch/x86/net/bpf_jit_comp.c
26008@@ -120,6 +120,11 @@ static inline void bpf_flush_icache(void *start, void *end)
26009 set_fs(old_fs);
26010 }
26011
26012+struct bpf_jit_work {
26013+ struct work_struct work;
26014+ void *image;
26015+};
26016+
26017 #define CHOOSE_LOAD_FUNC(K, func) \
26018 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
26019
26020@@ -146,6 +151,10 @@ void bpf_jit_compile(struct sk_filter *fp)
26021 if (addrs == NULL)
26022 return;
26023
26024+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
26025+ if (!fp->work)
26026+ goto out;
26027+
26028 /* Before first pass, make a rough estimation of addrs[]
26029 * each bpf instruction is translated to less than 64 bytes
26030 */
26031@@ -589,17 +598,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26032 break;
26033 default:
26034 /* hmm, too complex filter, give up with jit compiler */
26035- goto out;
26036+ goto error;
26037 }
26038 ilen = prog - temp;
26039 if (image) {
26040 if (unlikely(proglen + ilen > oldproglen)) {
26041 pr_err("bpb_jit_compile fatal error\n");
26042- kfree(addrs);
26043- module_free(NULL, image);
26044- return;
26045+ module_free_exec(NULL, image);
26046+ goto error;
26047 }
26048+ pax_open_kernel();
26049 memcpy(image + proglen, temp, ilen);
26050+ pax_close_kernel();
26051 }
26052 proglen += ilen;
26053 addrs[i] = proglen;
26054@@ -620,11 +630,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26055 break;
26056 }
26057 if (proglen == oldproglen) {
26058- image = module_alloc(max_t(unsigned int,
26059- proglen,
26060- sizeof(struct work_struct)));
26061+ image = module_alloc_exec(proglen);
26062 if (!image)
26063- goto out;
26064+ goto error;
26065 }
26066 oldproglen = proglen;
26067 }
26068@@ -640,7 +648,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26069 bpf_flush_icache(image, image + proglen);
26070
26071 fp->bpf_func = (void *)image;
26072- }
26073+ } else
26074+error:
26075+ kfree(fp->work);
26076+
26077 out:
26078 kfree(addrs);
26079 return;
26080@@ -648,18 +659,20 @@ out:
26081
26082 static void jit_free_defer(struct work_struct *arg)
26083 {
26084- module_free(NULL, arg);
26085+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
26086+ kfree(arg);
26087 }
26088
26089 /* run from softirq, we must use a work_struct to call
26090- * module_free() from process context
26091+ * module_free_exec() from process context
26092 */
26093 void bpf_jit_free(struct sk_filter *fp)
26094 {
26095 if (fp->bpf_func != sk_run_filter) {
26096- struct work_struct *work = (struct work_struct *)fp->bpf_func;
26097+ struct work_struct *work = &fp->work->work;
26098
26099 INIT_WORK(work, jit_free_defer);
26100+ fp->work->image = fp->bpf_func;
26101 schedule_work(work);
26102 }
26103 }
26104diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26105index d6aa6e8..266395a 100644
26106--- a/arch/x86/oprofile/backtrace.c
26107+++ b/arch/x86/oprofile/backtrace.c
26108@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
26109 struct stack_frame_ia32 *fp;
26110 unsigned long bytes;
26111
26112- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26113+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26114 if (bytes != sizeof(bufhead))
26115 return NULL;
26116
26117- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
26118+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
26119
26120 oprofile_add_trace(bufhead[0].return_address);
26121
26122@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
26123 struct stack_frame bufhead[2];
26124 unsigned long bytes;
26125
26126- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26127+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26128 if (bytes != sizeof(bufhead))
26129 return NULL;
26130
26131@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26132 {
26133 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
26134
26135- if (!user_mode_vm(regs)) {
26136+ if (!user_mode(regs)) {
26137 unsigned long stack = kernel_stack_pointer(regs);
26138 if (depth)
26139 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26140diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
26141index 140942f..8a5cc55 100644
26142--- a/arch/x86/pci/mrst.c
26143+++ b/arch/x86/pci/mrst.c
26144@@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
26145 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
26146 pci_mmcfg_late_init();
26147 pcibios_enable_irq = mrst_pci_irq_enable;
26148- pci_root_ops = pci_mrst_ops;
26149+ pax_open_kernel();
26150+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
26151+ pax_close_kernel();
26152 pci_soc_mode = 1;
26153 /* Continue with standard init */
26154 return 1;
26155diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26156index da8fe05..7ee6704 100644
26157--- a/arch/x86/pci/pcbios.c
26158+++ b/arch/x86/pci/pcbios.c
26159@@ -79,50 +79,93 @@ union bios32 {
26160 static struct {
26161 unsigned long address;
26162 unsigned short segment;
26163-} bios32_indirect = { 0, __KERNEL_CS };
26164+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26165
26166 /*
26167 * Returns the entry point for the given service, NULL on error
26168 */
26169
26170-static unsigned long bios32_service(unsigned long service)
26171+static unsigned long __devinit bios32_service(unsigned long service)
26172 {
26173 unsigned char return_code; /* %al */
26174 unsigned long address; /* %ebx */
26175 unsigned long length; /* %ecx */
26176 unsigned long entry; /* %edx */
26177 unsigned long flags;
26178+ struct desc_struct d, *gdt;
26179
26180 local_irq_save(flags);
26181- __asm__("lcall *(%%edi); cld"
26182+
26183+ gdt = get_cpu_gdt_table(smp_processor_id());
26184+
26185+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26186+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26187+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26188+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26189+
26190+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26191 : "=a" (return_code),
26192 "=b" (address),
26193 "=c" (length),
26194 "=d" (entry)
26195 : "0" (service),
26196 "1" (0),
26197- "D" (&bios32_indirect));
26198+ "D" (&bios32_indirect),
26199+ "r"(__PCIBIOS_DS)
26200+ : "memory");
26201+
26202+ pax_open_kernel();
26203+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26204+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26205+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26206+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26207+ pax_close_kernel();
26208+
26209 local_irq_restore(flags);
26210
26211 switch (return_code) {
26212- case 0:
26213- return address + entry;
26214- case 0x80: /* Not present */
26215- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26216- return 0;
26217- default: /* Shouldn't happen */
26218- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26219- service, return_code);
26220+ case 0: {
26221+ int cpu;
26222+ unsigned char flags;
26223+
26224+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26225+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26226+ printk(KERN_WARNING "bios32_service: not valid\n");
26227 return 0;
26228+ }
26229+ address = address + PAGE_OFFSET;
26230+ length += 16UL; /* some BIOSs underreport this... */
26231+ flags = 4;
26232+ if (length >= 64*1024*1024) {
26233+ length >>= PAGE_SHIFT;
26234+ flags |= 8;
26235+ }
26236+
26237+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26238+ gdt = get_cpu_gdt_table(cpu);
26239+ pack_descriptor(&d, address, length, 0x9b, flags);
26240+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26241+ pack_descriptor(&d, address, length, 0x93, flags);
26242+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26243+ }
26244+ return entry;
26245+ }
26246+ case 0x80: /* Not present */
26247+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26248+ return 0;
26249+ default: /* Shouldn't happen */
26250+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26251+ service, return_code);
26252+ return 0;
26253 }
26254 }
26255
26256 static struct {
26257 unsigned long address;
26258 unsigned short segment;
26259-} pci_indirect = { 0, __KERNEL_CS };
26260+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26261
26262-static int pci_bios_present;
26263+static int pci_bios_present __read_only;
26264
26265 static int __devinit check_pcibios(void)
26266 {
26267@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
26268 unsigned long flags, pcibios_entry;
26269
26270 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26271- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26272+ pci_indirect.address = pcibios_entry;
26273
26274 local_irq_save(flags);
26275- __asm__(
26276- "lcall *(%%edi); cld\n\t"
26277+ __asm__("movw %w6, %%ds\n\t"
26278+ "lcall *%%ss:(%%edi); cld\n\t"
26279+ "push %%ss\n\t"
26280+ "pop %%ds\n\t"
26281 "jc 1f\n\t"
26282 "xor %%ah, %%ah\n"
26283 "1:"
26284@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
26285 "=b" (ebx),
26286 "=c" (ecx)
26287 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26288- "D" (&pci_indirect)
26289+ "D" (&pci_indirect),
26290+ "r" (__PCIBIOS_DS)
26291 : "memory");
26292 local_irq_restore(flags);
26293
26294@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26295
26296 switch (len) {
26297 case 1:
26298- __asm__("lcall *(%%esi); cld\n\t"
26299+ __asm__("movw %w6, %%ds\n\t"
26300+ "lcall *%%ss:(%%esi); cld\n\t"
26301+ "push %%ss\n\t"
26302+ "pop %%ds\n\t"
26303 "jc 1f\n\t"
26304 "xor %%ah, %%ah\n"
26305 "1:"
26306@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26307 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26308 "b" (bx),
26309 "D" ((long)reg),
26310- "S" (&pci_indirect));
26311+ "S" (&pci_indirect),
26312+ "r" (__PCIBIOS_DS));
26313 /*
26314 * Zero-extend the result beyond 8 bits, do not trust the
26315 * BIOS having done it:
26316@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26317 *value &= 0xff;
26318 break;
26319 case 2:
26320- __asm__("lcall *(%%esi); cld\n\t"
26321+ __asm__("movw %w6, %%ds\n\t"
26322+ "lcall *%%ss:(%%esi); cld\n\t"
26323+ "push %%ss\n\t"
26324+ "pop %%ds\n\t"
26325 "jc 1f\n\t"
26326 "xor %%ah, %%ah\n"
26327 "1:"
26328@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26329 : "1" (PCIBIOS_READ_CONFIG_WORD),
26330 "b" (bx),
26331 "D" ((long)reg),
26332- "S" (&pci_indirect));
26333+ "S" (&pci_indirect),
26334+ "r" (__PCIBIOS_DS));
26335 /*
26336 * Zero-extend the result beyond 16 bits, do not trust the
26337 * BIOS having done it:
26338@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26339 *value &= 0xffff;
26340 break;
26341 case 4:
26342- __asm__("lcall *(%%esi); cld\n\t"
26343+ __asm__("movw %w6, %%ds\n\t"
26344+ "lcall *%%ss:(%%esi); cld\n\t"
26345+ "push %%ss\n\t"
26346+ "pop %%ds\n\t"
26347 "jc 1f\n\t"
26348 "xor %%ah, %%ah\n"
26349 "1:"
26350@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26351 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26352 "b" (bx),
26353 "D" ((long)reg),
26354- "S" (&pci_indirect));
26355+ "S" (&pci_indirect),
26356+ "r" (__PCIBIOS_DS));
26357 break;
26358 }
26359
26360@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26361
26362 switch (len) {
26363 case 1:
26364- __asm__("lcall *(%%esi); cld\n\t"
26365+ __asm__("movw %w6, %%ds\n\t"
26366+ "lcall *%%ss:(%%esi); cld\n\t"
26367+ "push %%ss\n\t"
26368+ "pop %%ds\n\t"
26369 "jc 1f\n\t"
26370 "xor %%ah, %%ah\n"
26371 "1:"
26372@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26373 "c" (value),
26374 "b" (bx),
26375 "D" ((long)reg),
26376- "S" (&pci_indirect));
26377+ "S" (&pci_indirect),
26378+ "r" (__PCIBIOS_DS));
26379 break;
26380 case 2:
26381- __asm__("lcall *(%%esi); cld\n\t"
26382+ __asm__("movw %w6, %%ds\n\t"
26383+ "lcall *%%ss:(%%esi); cld\n\t"
26384+ "push %%ss\n\t"
26385+ "pop %%ds\n\t"
26386 "jc 1f\n\t"
26387 "xor %%ah, %%ah\n"
26388 "1:"
26389@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26390 "c" (value),
26391 "b" (bx),
26392 "D" ((long)reg),
26393- "S" (&pci_indirect));
26394+ "S" (&pci_indirect),
26395+ "r" (__PCIBIOS_DS));
26396 break;
26397 case 4:
26398- __asm__("lcall *(%%esi); cld\n\t"
26399+ __asm__("movw %w6, %%ds\n\t"
26400+ "lcall *%%ss:(%%esi); cld\n\t"
26401+ "push %%ss\n\t"
26402+ "pop %%ds\n\t"
26403 "jc 1f\n\t"
26404 "xor %%ah, %%ah\n"
26405 "1:"
26406@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26407 "c" (value),
26408 "b" (bx),
26409 "D" ((long)reg),
26410- "S" (&pci_indirect));
26411+ "S" (&pci_indirect),
26412+ "r" (__PCIBIOS_DS));
26413 break;
26414 }
26415
26416@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26417
26418 DBG("PCI: Fetching IRQ routing table... ");
26419 __asm__("push %%es\n\t"
26420+ "movw %w8, %%ds\n\t"
26421 "push %%ds\n\t"
26422 "pop %%es\n\t"
26423- "lcall *(%%esi); cld\n\t"
26424+ "lcall *%%ss:(%%esi); cld\n\t"
26425 "pop %%es\n\t"
26426+ "push %%ss\n\t"
26427+ "pop %%ds\n"
26428 "jc 1f\n\t"
26429 "xor %%ah, %%ah\n"
26430 "1:"
26431@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26432 "1" (0),
26433 "D" ((long) &opt),
26434 "S" (&pci_indirect),
26435- "m" (opt)
26436+ "m" (opt),
26437+ "r" (__PCIBIOS_DS)
26438 : "memory");
26439 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26440 if (ret & 0xff00)
26441@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26442 {
26443 int ret;
26444
26445- __asm__("lcall *(%%esi); cld\n\t"
26446+ __asm__("movw %w5, %%ds\n\t"
26447+ "lcall *%%ss:(%%esi); cld\n\t"
26448+ "push %%ss\n\t"
26449+ "pop %%ds\n"
26450 "jc 1f\n\t"
26451 "xor %%ah, %%ah\n"
26452 "1:"
26453@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26454 : "0" (PCIBIOS_SET_PCI_HW_INT),
26455 "b" ((dev->bus->number << 8) | dev->devfn),
26456 "c" ((irq << 8) | (pin + 10)),
26457- "S" (&pci_indirect));
26458+ "S" (&pci_indirect),
26459+ "r" (__PCIBIOS_DS));
26460 return !(ret & 0xff00);
26461 }
26462 EXPORT_SYMBOL(pcibios_set_irq_routing);
26463diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
26464index 40e4469..1ab536e 100644
26465--- a/arch/x86/platform/efi/efi_32.c
26466+++ b/arch/x86/platform/efi/efi_32.c
26467@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
26468 {
26469 struct desc_ptr gdt_descr;
26470
26471+#ifdef CONFIG_PAX_KERNEXEC
26472+ struct desc_struct d;
26473+#endif
26474+
26475 local_irq_save(efi_rt_eflags);
26476
26477 load_cr3(initial_page_table);
26478 __flush_tlb_all();
26479
26480+#ifdef CONFIG_PAX_KERNEXEC
26481+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
26482+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26483+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
26484+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26485+#endif
26486+
26487 gdt_descr.address = __pa(get_cpu_gdt_table(0));
26488 gdt_descr.size = GDT_SIZE - 1;
26489 load_gdt(&gdt_descr);
26490@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
26491 {
26492 struct desc_ptr gdt_descr;
26493
26494+#ifdef CONFIG_PAX_KERNEXEC
26495+ struct desc_struct d;
26496+
26497+ memset(&d, 0, sizeof d);
26498+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26499+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26500+#endif
26501+
26502 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
26503 gdt_descr.size = GDT_SIZE - 1;
26504 load_gdt(&gdt_descr);
26505diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
26506index fbe66e6..c5c0dd2 100644
26507--- a/arch/x86/platform/efi/efi_stub_32.S
26508+++ b/arch/x86/platform/efi/efi_stub_32.S
26509@@ -6,7 +6,9 @@
26510 */
26511
26512 #include <linux/linkage.h>
26513+#include <linux/init.h>
26514 #include <asm/page_types.h>
26515+#include <asm/segment.h>
26516
26517 /*
26518 * efi_call_phys(void *, ...) is a function with variable parameters.
26519@@ -20,7 +22,7 @@
26520 * service functions will comply with gcc calling convention, too.
26521 */
26522
26523-.text
26524+__INIT
26525 ENTRY(efi_call_phys)
26526 /*
26527 * 0. The function can only be called in Linux kernel. So CS has been
26528@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
26529 * The mapping of lower virtual memory has been created in prelog and
26530 * epilog.
26531 */
26532- movl $1f, %edx
26533- subl $__PAGE_OFFSET, %edx
26534- jmp *%edx
26535+ movl $(__KERNEXEC_EFI_DS), %edx
26536+ mov %edx, %ds
26537+ mov %edx, %es
26538+ mov %edx, %ss
26539+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
26540 1:
26541
26542 /*
26543@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
26544 * parameter 2, ..., param n. To make things easy, we save the return
26545 * address of efi_call_phys in a global variable.
26546 */
26547- popl %edx
26548- movl %edx, saved_return_addr
26549- /* get the function pointer into ECX*/
26550- popl %ecx
26551- movl %ecx, efi_rt_function_ptr
26552- movl $2f, %edx
26553- subl $__PAGE_OFFSET, %edx
26554- pushl %edx
26555+ popl (saved_return_addr)
26556+ popl (efi_rt_function_ptr)
26557
26558 /*
26559 * 3. Clear PG bit in %CR0.
26560@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
26561 /*
26562 * 5. Call the physical function.
26563 */
26564- jmp *%ecx
26565+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
26566
26567-2:
26568 /*
26569 * 6. After EFI runtime service returns, control will return to
26570 * following instruction. We'd better readjust stack pointer first.
26571@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
26572 movl %cr0, %edx
26573 orl $0x80000000, %edx
26574 movl %edx, %cr0
26575- jmp 1f
26576-1:
26577+
26578 /*
26579 * 8. Now restore the virtual mode from flat mode by
26580 * adding EIP with PAGE_OFFSET.
26581 */
26582- movl $1f, %edx
26583- jmp *%edx
26584+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
26585 1:
26586+ movl $(__KERNEL_DS), %edx
26587+ mov %edx, %ds
26588+ mov %edx, %es
26589+ mov %edx, %ss
26590
26591 /*
26592 * 9. Balance the stack. And because EAX contain the return value,
26593 * we'd better not clobber it.
26594 */
26595- leal efi_rt_function_ptr, %edx
26596- movl (%edx), %ecx
26597- pushl %ecx
26598+ pushl (efi_rt_function_ptr)
26599
26600 /*
26601- * 10. Push the saved return address onto the stack and return.
26602+ * 10. Return to the saved return address.
26603 */
26604- leal saved_return_addr, %edx
26605- movl (%edx), %ecx
26606- pushl %ecx
26607- ret
26608+ jmpl *(saved_return_addr)
26609 ENDPROC(efi_call_phys)
26610 .previous
26611
26612-.data
26613+__INITDATA
26614 saved_return_addr:
26615 .long 0
26616 efi_rt_function_ptr:
26617diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
26618index 4c07cca..2c8427d 100644
26619--- a/arch/x86/platform/efi/efi_stub_64.S
26620+++ b/arch/x86/platform/efi/efi_stub_64.S
26621@@ -7,6 +7,7 @@
26622 */
26623
26624 #include <linux/linkage.h>
26625+#include <asm/alternative-asm.h>
26626
26627 #define SAVE_XMM \
26628 mov %rsp, %rax; \
26629@@ -40,6 +41,7 @@ ENTRY(efi_call0)
26630 call *%rdi
26631 addq $32, %rsp
26632 RESTORE_XMM
26633+ pax_force_retaddr 0, 1
26634 ret
26635 ENDPROC(efi_call0)
26636
26637@@ -50,6 +52,7 @@ ENTRY(efi_call1)
26638 call *%rdi
26639 addq $32, %rsp
26640 RESTORE_XMM
26641+ pax_force_retaddr 0, 1
26642 ret
26643 ENDPROC(efi_call1)
26644
26645@@ -60,6 +63,7 @@ ENTRY(efi_call2)
26646 call *%rdi
26647 addq $32, %rsp
26648 RESTORE_XMM
26649+ pax_force_retaddr 0, 1
26650 ret
26651 ENDPROC(efi_call2)
26652
26653@@ -71,6 +75,7 @@ ENTRY(efi_call3)
26654 call *%rdi
26655 addq $32, %rsp
26656 RESTORE_XMM
26657+ pax_force_retaddr 0, 1
26658 ret
26659 ENDPROC(efi_call3)
26660
26661@@ -83,6 +88,7 @@ ENTRY(efi_call4)
26662 call *%rdi
26663 addq $32, %rsp
26664 RESTORE_XMM
26665+ pax_force_retaddr 0, 1
26666 ret
26667 ENDPROC(efi_call4)
26668
26669@@ -96,6 +102,7 @@ ENTRY(efi_call5)
26670 call *%rdi
26671 addq $48, %rsp
26672 RESTORE_XMM
26673+ pax_force_retaddr 0, 1
26674 ret
26675 ENDPROC(efi_call5)
26676
26677@@ -112,5 +119,6 @@ ENTRY(efi_call6)
26678 call *%rdi
26679 addq $48, %rsp
26680 RESTORE_XMM
26681+ pax_force_retaddr 0, 1
26682 ret
26683 ENDPROC(efi_call6)
26684diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
26685index e31bcd8..f12dc46 100644
26686--- a/arch/x86/platform/mrst/mrst.c
26687+++ b/arch/x86/platform/mrst/mrst.c
26688@@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
26689 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
26690 int sfi_mrtc_num;
26691
26692-static void mrst_power_off(void)
26693+static __noreturn void mrst_power_off(void)
26694 {
26695+ BUG();
26696 }
26697
26698-static void mrst_reboot(void)
26699+static __noreturn void mrst_reboot(void)
26700 {
26701 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
26702+ BUG();
26703 }
26704
26705 /* parse all the mtimer info to a static mtimer array */
26706diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26707index 218cdb1..fd55c08 100644
26708--- a/arch/x86/power/cpu.c
26709+++ b/arch/x86/power/cpu.c
26710@@ -132,7 +132,7 @@ static void do_fpu_end(void)
26711 static void fix_processor_context(void)
26712 {
26713 int cpu = smp_processor_id();
26714- struct tss_struct *t = &per_cpu(init_tss, cpu);
26715+ struct tss_struct *t = init_tss + cpu;
26716
26717 set_tss_desc(cpu, t); /*
26718 * This just modifies memory; should not be
26719@@ -142,7 +142,9 @@ static void fix_processor_context(void)
26720 */
26721
26722 #ifdef CONFIG_X86_64
26723+ pax_open_kernel();
26724 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26725+ pax_close_kernel();
26726
26727 syscall_init(); /* This sets MSR_*STAR and related */
26728 #endif
26729diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
26730index b685296..0180fa9 100644
26731--- a/arch/x86/tools/relocs.c
26732+++ b/arch/x86/tools/relocs.c
26733@@ -12,10 +12,13 @@
26734 #include <regex.h>
26735 #include <tools/le_byteshift.h>
26736
26737+#include "../../../include/generated/autoconf.h"
26738+
26739 static void die(char *fmt, ...);
26740
26741 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
26742 static Elf32_Ehdr ehdr;
26743+static Elf32_Phdr *phdr;
26744 static unsigned long reloc_count, reloc_idx;
26745 static unsigned long *relocs;
26746 static unsigned long reloc16_count, reloc16_idx;
26747@@ -323,9 +326,39 @@ static void read_ehdr(FILE *fp)
26748 }
26749 }
26750
26751+static void read_phdrs(FILE *fp)
26752+{
26753+ unsigned int i;
26754+
26755+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
26756+ if (!phdr) {
26757+ die("Unable to allocate %d program headers\n",
26758+ ehdr.e_phnum);
26759+ }
26760+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
26761+ die("Seek to %d failed: %s\n",
26762+ ehdr.e_phoff, strerror(errno));
26763+ }
26764+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
26765+ die("Cannot read ELF program headers: %s\n",
26766+ strerror(errno));
26767+ }
26768+ for(i = 0; i < ehdr.e_phnum; i++) {
26769+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
26770+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
26771+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
26772+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
26773+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
26774+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
26775+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
26776+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
26777+ }
26778+
26779+}
26780+
26781 static void read_shdrs(FILE *fp)
26782 {
26783- int i;
26784+ unsigned int i;
26785 Elf32_Shdr shdr;
26786
26787 secs = calloc(ehdr.e_shnum, sizeof(struct section));
26788@@ -360,7 +393,7 @@ static void read_shdrs(FILE *fp)
26789
26790 static void read_strtabs(FILE *fp)
26791 {
26792- int i;
26793+ unsigned int i;
26794 for (i = 0; i < ehdr.e_shnum; i++) {
26795 struct section *sec = &secs[i];
26796 if (sec->shdr.sh_type != SHT_STRTAB) {
26797@@ -385,7 +418,7 @@ static void read_strtabs(FILE *fp)
26798
26799 static void read_symtabs(FILE *fp)
26800 {
26801- int i,j;
26802+ unsigned int i,j;
26803 for (i = 0; i < ehdr.e_shnum; i++) {
26804 struct section *sec = &secs[i];
26805 if (sec->shdr.sh_type != SHT_SYMTAB) {
26806@@ -418,7 +451,9 @@ static void read_symtabs(FILE *fp)
26807
26808 static void read_relocs(FILE *fp)
26809 {
26810- int i,j;
26811+ unsigned int i,j;
26812+ uint32_t base;
26813+
26814 for (i = 0; i < ehdr.e_shnum; i++) {
26815 struct section *sec = &secs[i];
26816 if (sec->shdr.sh_type != SHT_REL) {
26817@@ -438,9 +473,22 @@ static void read_relocs(FILE *fp)
26818 die("Cannot read symbol table: %s\n",
26819 strerror(errno));
26820 }
26821+ base = 0;
26822+
26823+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
26824+ for (j = 0; j < ehdr.e_phnum; j++) {
26825+ if (phdr[j].p_type != PT_LOAD )
26826+ continue;
26827+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
26828+ continue;
26829+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
26830+ break;
26831+ }
26832+#endif
26833+
26834 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
26835 Elf32_Rel *rel = &sec->reltab[j];
26836- rel->r_offset = elf32_to_cpu(rel->r_offset);
26837+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
26838 rel->r_info = elf32_to_cpu(rel->r_info);
26839 }
26840 }
26841@@ -449,13 +497,13 @@ static void read_relocs(FILE *fp)
26842
26843 static void print_absolute_symbols(void)
26844 {
26845- int i;
26846+ unsigned int i;
26847 printf("Absolute symbols\n");
26848 printf(" Num: Value Size Type Bind Visibility Name\n");
26849 for (i = 0; i < ehdr.e_shnum; i++) {
26850 struct section *sec = &secs[i];
26851 char *sym_strtab;
26852- int j;
26853+ unsigned int j;
26854
26855 if (sec->shdr.sh_type != SHT_SYMTAB) {
26856 continue;
26857@@ -482,7 +530,7 @@ static void print_absolute_symbols(void)
26858
26859 static void print_absolute_relocs(void)
26860 {
26861- int i, printed = 0;
26862+ unsigned int i, printed = 0;
26863
26864 for (i = 0; i < ehdr.e_shnum; i++) {
26865 struct section *sec = &secs[i];
26866@@ -551,7 +599,7 @@ static void print_absolute_relocs(void)
26867 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
26868 int use_real_mode)
26869 {
26870- int i;
26871+ unsigned int i;
26872 /* Walk through the relocations */
26873 for (i = 0; i < ehdr.e_shnum; i++) {
26874 char *sym_strtab;
26875@@ -581,6 +629,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
26876 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
26877 r_type = ELF32_R_TYPE(rel->r_info);
26878
26879+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
26880+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
26881+ continue;
26882+
26883+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
26884+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
26885+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
26886+ continue;
26887+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
26888+ continue;
26889+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
26890+ continue;
26891+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
26892+ continue;
26893+#endif
26894+
26895 shn_abs = sym->st_shndx == SHN_ABS;
26896
26897 switch (r_type) {
26898@@ -674,7 +738,7 @@ static int write32(unsigned int v, FILE *f)
26899
26900 static void emit_relocs(int as_text, int use_real_mode)
26901 {
26902- int i;
26903+ unsigned int i;
26904 /* Count how many relocations I have and allocate space for them. */
26905 reloc_count = 0;
26906 walk_relocs(count_reloc, use_real_mode);
26907@@ -801,6 +865,7 @@ int main(int argc, char **argv)
26908 fname, strerror(errno));
26909 }
26910 read_ehdr(fp);
26911+ read_phdrs(fp);
26912 read_shdrs(fp);
26913 read_strtabs(fp);
26914 read_symtabs(fp);
26915diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
26916index fd14be1..e3c79c0 100644
26917--- a/arch/x86/vdso/Makefile
26918+++ b/arch/x86/vdso/Makefile
26919@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
26920 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
26921 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
26922
26923-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26924+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26925 GCOV_PROFILE := n
26926
26927 #
26928diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
26929index 66e6d93..587f435 100644
26930--- a/arch/x86/vdso/vdso32-setup.c
26931+++ b/arch/x86/vdso/vdso32-setup.c
26932@@ -25,6 +25,7 @@
26933 #include <asm/tlbflush.h>
26934 #include <asm/vdso.h>
26935 #include <asm/proto.h>
26936+#include <asm/mman.h>
26937
26938 enum {
26939 VDSO_DISABLED = 0,
26940@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26941 void enable_sep_cpu(void)
26942 {
26943 int cpu = get_cpu();
26944- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26945+ struct tss_struct *tss = init_tss + cpu;
26946
26947 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26948 put_cpu();
26949@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26950 gate_vma.vm_start = FIXADDR_USER_START;
26951 gate_vma.vm_end = FIXADDR_USER_END;
26952 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26953- gate_vma.vm_page_prot = __P101;
26954+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26955
26956 return 0;
26957 }
26958@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26959 if (compat)
26960 addr = VDSO_HIGH_BASE;
26961 else {
26962- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26963+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26964 if (IS_ERR_VALUE(addr)) {
26965 ret = addr;
26966 goto up_fail;
26967 }
26968 }
26969
26970- current->mm->context.vdso = (void *)addr;
26971+ current->mm->context.vdso = addr;
26972
26973 if (compat_uses_vma || !compat) {
26974 /*
26975@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26976 }
26977
26978 current_thread_info()->sysenter_return =
26979- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26980+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26981
26982 up_fail:
26983 if (ret)
26984- current->mm->context.vdso = NULL;
26985+ current->mm->context.vdso = 0;
26986
26987 up_write(&mm->mmap_sem);
26988
26989@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
26990
26991 const char *arch_vma_name(struct vm_area_struct *vma)
26992 {
26993- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26994+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26995 return "[vdso]";
26996+
26997+#ifdef CONFIG_PAX_SEGMEXEC
26998+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
26999+ return "[vdso]";
27000+#endif
27001+
27002 return NULL;
27003 }
27004
27005@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
27006 * Check to see if the corresponding task was created in compat vdso
27007 * mode.
27008 */
27009- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
27010+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
27011 return &gate_vma;
27012 return NULL;
27013 }
27014diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
27015index 00aaf04..4a26505 100644
27016--- a/arch/x86/vdso/vma.c
27017+++ b/arch/x86/vdso/vma.c
27018@@ -16,8 +16,6 @@
27019 #include <asm/vdso.h>
27020 #include <asm/page.h>
27021
27022-unsigned int __read_mostly vdso_enabled = 1;
27023-
27024 extern char vdso_start[], vdso_end[];
27025 extern unsigned short vdso_sync_cpuid;
27026
27027@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
27028 * unaligned here as a result of stack start randomization.
27029 */
27030 addr = PAGE_ALIGN(addr);
27031- addr = align_addr(addr, NULL, ALIGN_VDSO);
27032
27033 return addr;
27034 }
27035@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
27036 unsigned size)
27037 {
27038 struct mm_struct *mm = current->mm;
27039- unsigned long addr;
27040+ unsigned long addr = 0;
27041 int ret;
27042
27043- if (!vdso_enabled)
27044- return 0;
27045-
27046 down_write(&mm->mmap_sem);
27047+
27048+#ifdef CONFIG_PAX_RANDMMAP
27049+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27050+#endif
27051+
27052 addr = vdso_addr(mm->start_stack, size);
27053+ addr = align_addr(addr, NULL, ALIGN_VDSO);
27054 addr = get_unmapped_area(NULL, addr, size, 0, 0);
27055 if (IS_ERR_VALUE(addr)) {
27056 ret = addr;
27057 goto up_fail;
27058 }
27059
27060- current->mm->context.vdso = (void *)addr;
27061+ mm->context.vdso = addr;
27062
27063 ret = install_special_mapping(mm, addr, size,
27064 VM_READ|VM_EXEC|
27065 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
27066 pages);
27067- if (ret) {
27068- current->mm->context.vdso = NULL;
27069- goto up_fail;
27070- }
27071+ if (ret)
27072+ mm->context.vdso = 0;
27073
27074 up_fail:
27075 up_write(&mm->mmap_sem);
27076@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27077 vdsox32_size);
27078 }
27079 #endif
27080-
27081-static __init int vdso_setup(char *s)
27082-{
27083- vdso_enabled = simple_strtoul(s, NULL, 0);
27084- return 0;
27085-}
27086-__setup("vdso=", vdso_setup);
27087diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
27088index 6c7f1e8..de96944 100644
27089--- a/arch/x86/xen/enlighten.c
27090+++ b/arch/x86/xen/enlighten.c
27091@@ -95,8 +95,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
27092
27093 struct shared_info xen_dummy_shared_info;
27094
27095-void *xen_initial_gdt;
27096-
27097 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
27098 __read_mostly int xen_have_vector_callback;
27099 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
27100@@ -1157,30 +1155,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
27101 #endif
27102 };
27103
27104-static void xen_reboot(int reason)
27105+static __noreturn void xen_reboot(int reason)
27106 {
27107 struct sched_shutdown r = { .reason = reason };
27108
27109- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
27110- BUG();
27111+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
27112+ BUG();
27113 }
27114
27115-static void xen_restart(char *msg)
27116+static __noreturn void xen_restart(char *msg)
27117 {
27118 xen_reboot(SHUTDOWN_reboot);
27119 }
27120
27121-static void xen_emergency_restart(void)
27122+static __noreturn void xen_emergency_restart(void)
27123 {
27124 xen_reboot(SHUTDOWN_reboot);
27125 }
27126
27127-static void xen_machine_halt(void)
27128+static __noreturn void xen_machine_halt(void)
27129 {
27130 xen_reboot(SHUTDOWN_poweroff);
27131 }
27132
27133-static void xen_machine_power_off(void)
27134+static __noreturn void xen_machine_power_off(void)
27135 {
27136 if (pm_power_off)
27137 pm_power_off();
27138@@ -1283,7 +1281,17 @@ asmlinkage void __init xen_start_kernel(void)
27139 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
27140
27141 /* Work out if we support NX */
27142- x86_configure_nx();
27143+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27144+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
27145+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
27146+ unsigned l, h;
27147+
27148+ __supported_pte_mask |= _PAGE_NX;
27149+ rdmsr(MSR_EFER, l, h);
27150+ l |= EFER_NX;
27151+ wrmsr(MSR_EFER, l, h);
27152+ }
27153+#endif
27154
27155 xen_setup_features();
27156
27157@@ -1314,13 +1322,6 @@ asmlinkage void __init xen_start_kernel(void)
27158
27159 machine_ops = xen_machine_ops;
27160
27161- /*
27162- * The only reliable way to retain the initial address of the
27163- * percpu gdt_page is to remember it here, so we can go and
27164- * mark it RW later, when the initial percpu area is freed.
27165- */
27166- xen_initial_gdt = &per_cpu(gdt_page, 0);
27167-
27168 xen_smp_init();
27169
27170 #ifdef CONFIG_ACPI_NUMA
27171diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
27172index 69f5857..0699dc5 100644
27173--- a/arch/x86/xen/mmu.c
27174+++ b/arch/x86/xen/mmu.c
27175@@ -1738,6 +1738,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27176 convert_pfn_mfn(init_level4_pgt);
27177 convert_pfn_mfn(level3_ident_pgt);
27178 convert_pfn_mfn(level3_kernel_pgt);
27179+ convert_pfn_mfn(level3_vmalloc_start_pgt);
27180+ convert_pfn_mfn(level3_vmalloc_end_pgt);
27181+ convert_pfn_mfn(level3_vmemmap_pgt);
27182
27183 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
27184 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
27185@@ -1756,7 +1759,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27186 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
27187 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
27188 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
27189+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
27190+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
27191+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
27192 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
27193+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
27194 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
27195 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
27196
27197@@ -1964,6 +1971,7 @@ static void __init xen_post_allocator_init(void)
27198 pv_mmu_ops.set_pud = xen_set_pud;
27199 #if PAGETABLE_LEVELS == 4
27200 pv_mmu_ops.set_pgd = xen_set_pgd;
27201+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27202 #endif
27203
27204 /* This will work as long as patching hasn't happened yet
27205@@ -2045,6 +2053,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
27206 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27207 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27208 .set_pgd = xen_set_pgd_hyper,
27209+ .set_pgd_batched = xen_set_pgd_hyper,
27210
27211 .alloc_pud = xen_alloc_pmd_init,
27212 .release_pud = xen_release_pmd_init,
27213diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
27214index 0503c0c..ceb2d16 100644
27215--- a/arch/x86/xen/smp.c
27216+++ b/arch/x86/xen/smp.c
27217@@ -215,11 +215,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
27218 {
27219 BUG_ON(smp_processor_id() != 0);
27220 native_smp_prepare_boot_cpu();
27221-
27222- /* We've switched to the "real" per-cpu gdt, so make sure the
27223- old memory can be recycled */
27224- make_lowmem_page_readwrite(xen_initial_gdt);
27225-
27226 xen_filter_cpu_maps();
27227 xen_setup_vcpu_info_placement();
27228 }
27229@@ -296,12 +291,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
27230 gdt = get_cpu_gdt_table(cpu);
27231
27232 ctxt->flags = VGCF_IN_KERNEL;
27233- ctxt->user_regs.ds = __USER_DS;
27234- ctxt->user_regs.es = __USER_DS;
27235+ ctxt->user_regs.ds = __KERNEL_DS;
27236+ ctxt->user_regs.es = __KERNEL_DS;
27237 ctxt->user_regs.ss = __KERNEL_DS;
27238 #ifdef CONFIG_X86_32
27239 ctxt->user_regs.fs = __KERNEL_PERCPU;
27240- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27241+ savesegment(gs, ctxt->user_regs.gs);
27242 #else
27243 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27244 #endif
27245@@ -352,13 +347,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
27246 int rc;
27247
27248 per_cpu(current_task, cpu) = idle;
27249+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27250 #ifdef CONFIG_X86_32
27251 irq_ctx_init(cpu);
27252 #else
27253 clear_tsk_thread_flag(idle, TIF_FORK);
27254- per_cpu(kernel_stack, cpu) =
27255- (unsigned long)task_stack_page(idle) -
27256- KERNEL_STACK_OFFSET + THREAD_SIZE;
27257+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27258 #endif
27259 xen_setup_runstate_info(cpu);
27260 xen_setup_timer(cpu);
27261diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27262index b040b0e..8cc4fe0 100644
27263--- a/arch/x86/xen/xen-asm_32.S
27264+++ b/arch/x86/xen/xen-asm_32.S
27265@@ -83,14 +83,14 @@ ENTRY(xen_iret)
27266 ESP_OFFSET=4 # bytes pushed onto stack
27267
27268 /*
27269- * Store vcpu_info pointer for easy access. Do it this way to
27270- * avoid having to reload %fs
27271+ * Store vcpu_info pointer for easy access.
27272 */
27273 #ifdef CONFIG_SMP
27274- GET_THREAD_INFO(%eax)
27275- movl TI_cpu(%eax), %eax
27276- movl __per_cpu_offset(,%eax,4), %eax
27277- mov xen_vcpu(%eax), %eax
27278+ push %fs
27279+ mov $(__KERNEL_PERCPU), %eax
27280+ mov %eax, %fs
27281+ mov PER_CPU_VAR(xen_vcpu), %eax
27282+ pop %fs
27283 #else
27284 movl xen_vcpu, %eax
27285 #endif
27286diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27287index aaa7291..3f77960 100644
27288--- a/arch/x86/xen/xen-head.S
27289+++ b/arch/x86/xen/xen-head.S
27290@@ -19,6 +19,17 @@ ENTRY(startup_xen)
27291 #ifdef CONFIG_X86_32
27292 mov %esi,xen_start_info
27293 mov $init_thread_union+THREAD_SIZE,%esp
27294+#ifdef CONFIG_SMP
27295+ movl $cpu_gdt_table,%edi
27296+ movl $__per_cpu_load,%eax
27297+ movw %ax,__KERNEL_PERCPU + 2(%edi)
27298+ rorl $16,%eax
27299+ movb %al,__KERNEL_PERCPU + 4(%edi)
27300+ movb %ah,__KERNEL_PERCPU + 7(%edi)
27301+ movl $__per_cpu_end - 1,%eax
27302+ subl $__per_cpu_start,%eax
27303+ movw %ax,__KERNEL_PERCPU + 0(%edi)
27304+#endif
27305 #else
27306 mov %rsi,xen_start_info
27307 mov $init_thread_union+THREAD_SIZE,%rsp
27308diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27309index b095739..8c17bcd 100644
27310--- a/arch/x86/xen/xen-ops.h
27311+++ b/arch/x86/xen/xen-ops.h
27312@@ -10,8 +10,6 @@
27313 extern const char xen_hypervisor_callback[];
27314 extern const char xen_failsafe_callback[];
27315
27316-extern void *xen_initial_gdt;
27317-
27318 struct trap_info;
27319 void xen_copy_trap_info(struct trap_info *traps);
27320
27321diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
27322index 525bd3d..ef888b1 100644
27323--- a/arch/xtensa/variants/dc232b/include/variant/core.h
27324+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
27325@@ -119,9 +119,9 @@
27326 ----------------------------------------------------------------------*/
27327
27328 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
27329-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
27330 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
27331 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
27332+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27333
27334 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
27335 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
27336diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
27337index 2f33760..835e50a 100644
27338--- a/arch/xtensa/variants/fsf/include/variant/core.h
27339+++ b/arch/xtensa/variants/fsf/include/variant/core.h
27340@@ -11,6 +11,7 @@
27341 #ifndef _XTENSA_CORE_H
27342 #define _XTENSA_CORE_H
27343
27344+#include <linux/const.h>
27345
27346 /****************************************************************************
27347 Parameters Useful for Any Code, USER or PRIVILEGED
27348@@ -112,9 +113,9 @@
27349 ----------------------------------------------------------------------*/
27350
27351 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27352-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27353 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27354 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27355+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27356
27357 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
27358 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
27359diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
27360index af00795..2bb8105 100644
27361--- a/arch/xtensa/variants/s6000/include/variant/core.h
27362+++ b/arch/xtensa/variants/s6000/include/variant/core.h
27363@@ -11,6 +11,7 @@
27364 #ifndef _XTENSA_CORE_CONFIGURATION_H
27365 #define _XTENSA_CORE_CONFIGURATION_H
27366
27367+#include <linux/const.h>
27368
27369 /****************************************************************************
27370 Parameters Useful for Any Code, USER or PRIVILEGED
27371@@ -118,9 +119,9 @@
27372 ----------------------------------------------------------------------*/
27373
27374 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27375-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27376 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27377 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27378+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27379
27380 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
27381 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
27382diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27383index 58916af..9cb880b 100644
27384--- a/block/blk-iopoll.c
27385+++ b/block/blk-iopoll.c
27386@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27387 }
27388 EXPORT_SYMBOL(blk_iopoll_complete);
27389
27390-static void blk_iopoll_softirq(struct softirq_action *h)
27391+static void blk_iopoll_softirq(void)
27392 {
27393 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27394 int rearm = 0, budget = blk_iopoll_budget;
27395diff --git a/block/blk-map.c b/block/blk-map.c
27396index 623e1cd..ca1e109 100644
27397--- a/block/blk-map.c
27398+++ b/block/blk-map.c
27399@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27400 if (!len || !kbuf)
27401 return -EINVAL;
27402
27403- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
27404+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
27405 if (do_copy)
27406 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27407 else
27408diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27409index 467c8de..4bddc6d 100644
27410--- a/block/blk-softirq.c
27411+++ b/block/blk-softirq.c
27412@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27413 * Softirq action handler - move entries to local list and loop over them
27414 * while passing them to the queue registered handler.
27415 */
27416-static void blk_done_softirq(struct softirq_action *h)
27417+static void blk_done_softirq(void)
27418 {
27419 struct list_head *cpu_list, local_list;
27420
27421diff --git a/block/bsg.c b/block/bsg.c
27422index ff64ae3..593560c 100644
27423--- a/block/bsg.c
27424+++ b/block/bsg.c
27425@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27426 struct sg_io_v4 *hdr, struct bsg_device *bd,
27427 fmode_t has_write_perm)
27428 {
27429+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27430+ unsigned char *cmdptr;
27431+
27432 if (hdr->request_len > BLK_MAX_CDB) {
27433 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27434 if (!rq->cmd)
27435 return -ENOMEM;
27436- }
27437+ cmdptr = rq->cmd;
27438+ } else
27439+ cmdptr = tmpcmd;
27440
27441- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
27442+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27443 hdr->request_len))
27444 return -EFAULT;
27445
27446+ if (cmdptr != rq->cmd)
27447+ memcpy(rq->cmd, cmdptr, hdr->request_len);
27448+
27449 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27450 if (blk_verify_command(rq->cmd, has_write_perm))
27451 return -EPERM;
27452diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27453index 7c668c8..db3521c 100644
27454--- a/block/compat_ioctl.c
27455+++ b/block/compat_ioctl.c
27456@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27457 err |= __get_user(f->spec1, &uf->spec1);
27458 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27459 err |= __get_user(name, &uf->name);
27460- f->name = compat_ptr(name);
27461+ f->name = (void __force_kernel *)compat_ptr(name);
27462 if (err) {
27463 err = -EFAULT;
27464 goto out;
27465diff --git a/block/partitions/efi.c b/block/partitions/efi.c
27466index 6296b40..417c00f 100644
27467--- a/block/partitions/efi.c
27468+++ b/block/partitions/efi.c
27469@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
27470 if (!gpt)
27471 return NULL;
27472
27473+ if (!le32_to_cpu(gpt->num_partition_entries))
27474+ return NULL;
27475+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
27476+ if (!pte)
27477+ return NULL;
27478+
27479 count = le32_to_cpu(gpt->num_partition_entries) *
27480 le32_to_cpu(gpt->sizeof_partition_entry);
27481- if (!count)
27482- return NULL;
27483- pte = kzalloc(count, GFP_KERNEL);
27484- if (!pte)
27485- return NULL;
27486-
27487 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
27488 (u8 *) pte,
27489 count) < count) {
27490diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27491index 260fa80..e8f3caf 100644
27492--- a/block/scsi_ioctl.c
27493+++ b/block/scsi_ioctl.c
27494@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
27495 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27496 struct sg_io_hdr *hdr, fmode_t mode)
27497 {
27498- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27499+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27500+ unsigned char *cmdptr;
27501+
27502+ if (rq->cmd != rq->__cmd)
27503+ cmdptr = rq->cmd;
27504+ else
27505+ cmdptr = tmpcmd;
27506+
27507+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27508 return -EFAULT;
27509+
27510+ if (cmdptr != rq->cmd)
27511+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27512+
27513 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27514 return -EPERM;
27515
27516@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27517 int err;
27518 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27519 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27520+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27521+ unsigned char *cmdptr;
27522
27523 if (!sic)
27524 return -EINVAL;
27525@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27526 */
27527 err = -EFAULT;
27528 rq->cmd_len = cmdlen;
27529- if (copy_from_user(rq->cmd, sic->data, cmdlen))
27530+
27531+ if (rq->cmd != rq->__cmd)
27532+ cmdptr = rq->cmd;
27533+ else
27534+ cmdptr = tmpcmd;
27535+
27536+ if (copy_from_user(cmdptr, sic->data, cmdlen))
27537 goto error;
27538
27539+ if (rq->cmd != cmdptr)
27540+ memcpy(rq->cmd, cmdptr, cmdlen);
27541+
27542 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27543 goto error;
27544
27545diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27546index 671d4d6..5f24030 100644
27547--- a/crypto/cryptd.c
27548+++ b/crypto/cryptd.c
27549@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
27550
27551 struct cryptd_blkcipher_request_ctx {
27552 crypto_completion_t complete;
27553-};
27554+} __no_const;
27555
27556 struct cryptd_hash_ctx {
27557 struct crypto_shash *child;
27558@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
27559
27560 struct cryptd_aead_request_ctx {
27561 crypto_completion_t complete;
27562-};
27563+} __no_const;
27564
27565 static void cryptd_queue_worker(struct work_struct *work);
27566
27567diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
27568index e6defd8..c26a225 100644
27569--- a/drivers/acpi/apei/cper.c
27570+++ b/drivers/acpi/apei/cper.c
27571@@ -38,12 +38,12 @@
27572 */
27573 u64 cper_next_record_id(void)
27574 {
27575- static atomic64_t seq;
27576+ static atomic64_unchecked_t seq;
27577
27578- if (!atomic64_read(&seq))
27579- atomic64_set(&seq, ((u64)get_seconds()) << 32);
27580+ if (!atomic64_read_unchecked(&seq))
27581+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
27582
27583- return atomic64_inc_return(&seq);
27584+ return atomic64_inc_return_unchecked(&seq);
27585 }
27586 EXPORT_SYMBOL_GPL(cper_next_record_id);
27587
27588diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
27589index 7586544..636a2f0 100644
27590--- a/drivers/acpi/ec_sys.c
27591+++ b/drivers/acpi/ec_sys.c
27592@@ -12,6 +12,7 @@
27593 #include <linux/acpi.h>
27594 #include <linux/debugfs.h>
27595 #include <linux/module.h>
27596+#include <linux/uaccess.h>
27597 #include "internal.h"
27598
27599 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
27600@@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27601 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
27602 */
27603 unsigned int size = EC_SPACE_SIZE;
27604- u8 *data = (u8 *) buf;
27605+ u8 data;
27606 loff_t init_off = *off;
27607 int err = 0;
27608
27609@@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27610 size = count;
27611
27612 while (size) {
27613- err = ec_read(*off, &data[*off - init_off]);
27614+ err = ec_read(*off, &data);
27615 if (err)
27616 return err;
27617+ if (put_user(data, &buf[*off - init_off]))
27618+ return -EFAULT;
27619 *off += 1;
27620 size--;
27621 }
27622@@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27623
27624 unsigned int size = count;
27625 loff_t init_off = *off;
27626- u8 *data = (u8 *) buf;
27627 int err = 0;
27628
27629 if (*off >= EC_SPACE_SIZE)
27630@@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27631 }
27632
27633 while (size) {
27634- u8 byte_write = data[*off - init_off];
27635+ u8 byte_write;
27636+ if (get_user(byte_write, &buf[*off - init_off]))
27637+ return -EFAULT;
27638 err = ec_write(*off, byte_write);
27639 if (err)
27640 return err;
27641diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27642index 251c7b62..000462d 100644
27643--- a/drivers/acpi/proc.c
27644+++ b/drivers/acpi/proc.c
27645@@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
27646 size_t count, loff_t * ppos)
27647 {
27648 struct list_head *node, *next;
27649- char strbuf[5];
27650- char str[5] = "";
27651- unsigned int len = count;
27652+ char strbuf[5] = {0};
27653
27654- if (len > 4)
27655- len = 4;
27656- if (len < 0)
27657+ if (count > 4)
27658+ count = 4;
27659+ if (copy_from_user(strbuf, buffer, count))
27660 return -EFAULT;
27661-
27662- if (copy_from_user(strbuf, buffer, len))
27663- return -EFAULT;
27664- strbuf[len] = '\0';
27665- sscanf(strbuf, "%s", str);
27666+ strbuf[count] = '\0';
27667
27668 mutex_lock(&acpi_device_lock);
27669 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27670@@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
27671 if (!dev->wakeup.flags.valid)
27672 continue;
27673
27674- if (!strncmp(dev->pnp.bus_id, str, 4)) {
27675+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27676 if (device_can_wakeup(&dev->dev)) {
27677 bool enable = !device_may_wakeup(&dev->dev);
27678 device_set_wakeup_enable(&dev->dev, enable);
27679diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
27680index 0734086..3ad3e4c 100644
27681--- a/drivers/acpi/processor_driver.c
27682+++ b/drivers/acpi/processor_driver.c
27683@@ -556,7 +556,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27684 return 0;
27685 #endif
27686
27687- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27688+ BUG_ON(pr->id >= nr_cpu_ids);
27689
27690 /*
27691 * Buggy BIOS check
27692diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27693index d31ee55..8363a8b 100644
27694--- a/drivers/ata/libata-core.c
27695+++ b/drivers/ata/libata-core.c
27696@@ -4742,7 +4742,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27697 struct ata_port *ap;
27698 unsigned int tag;
27699
27700- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27701+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27702 ap = qc->ap;
27703
27704 qc->flags = 0;
27705@@ -4758,7 +4758,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27706 struct ata_port *ap;
27707 struct ata_link *link;
27708
27709- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27710+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27711 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27712 ap = qc->ap;
27713 link = qc->dev->link;
27714@@ -5822,6 +5822,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27715 return;
27716
27717 spin_lock(&lock);
27718+ pax_open_kernel();
27719
27720 for (cur = ops->inherits; cur; cur = cur->inherits) {
27721 void **inherit = (void **)cur;
27722@@ -5835,8 +5836,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27723 if (IS_ERR(*pp))
27724 *pp = NULL;
27725
27726- ops->inherits = NULL;
27727+ *(struct ata_port_operations **)&ops->inherits = NULL;
27728
27729+ pax_close_kernel();
27730 spin_unlock(&lock);
27731 }
27732
27733diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
27734index 3239517..343b5f6 100644
27735--- a/drivers/ata/pata_arasan_cf.c
27736+++ b/drivers/ata/pata_arasan_cf.c
27737@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
27738 /* Handle platform specific quirks */
27739 if (pdata->quirk) {
27740 if (pdata->quirk & CF_BROKEN_PIO) {
27741- ap->ops->set_piomode = NULL;
27742+ pax_open_kernel();
27743+ *(void **)&ap->ops->set_piomode = NULL;
27744+ pax_close_kernel();
27745 ap->pio_mask = 0;
27746 }
27747 if (pdata->quirk & CF_BROKEN_MWDMA)
27748diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
27749index f9b983a..887b9d8 100644
27750--- a/drivers/atm/adummy.c
27751+++ b/drivers/atm/adummy.c
27752@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
27753 vcc->pop(vcc, skb);
27754 else
27755 dev_kfree_skb_any(skb);
27756- atomic_inc(&vcc->stats->tx);
27757+ atomic_inc_unchecked(&vcc->stats->tx);
27758
27759 return 0;
27760 }
27761diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
27762index f8f41e0..1f987dd 100644
27763--- a/drivers/atm/ambassador.c
27764+++ b/drivers/atm/ambassador.c
27765@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
27766 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
27767
27768 // VC layer stats
27769- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27770+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27771
27772 // free the descriptor
27773 kfree (tx_descr);
27774@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27775 dump_skb ("<<<", vc, skb);
27776
27777 // VC layer stats
27778- atomic_inc(&atm_vcc->stats->rx);
27779+ atomic_inc_unchecked(&atm_vcc->stats->rx);
27780 __net_timestamp(skb);
27781 // end of our responsibility
27782 atm_vcc->push (atm_vcc, skb);
27783@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27784 } else {
27785 PRINTK (KERN_INFO, "dropped over-size frame");
27786 // should we count this?
27787- atomic_inc(&atm_vcc->stats->rx_drop);
27788+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27789 }
27790
27791 } else {
27792@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
27793 }
27794
27795 if (check_area (skb->data, skb->len)) {
27796- atomic_inc(&atm_vcc->stats->tx_err);
27797+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
27798 return -ENOMEM; // ?
27799 }
27800
27801diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
27802index b22d71c..d6e1049 100644
27803--- a/drivers/atm/atmtcp.c
27804+++ b/drivers/atm/atmtcp.c
27805@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27806 if (vcc->pop) vcc->pop(vcc,skb);
27807 else dev_kfree_skb(skb);
27808 if (dev_data) return 0;
27809- atomic_inc(&vcc->stats->tx_err);
27810+ atomic_inc_unchecked(&vcc->stats->tx_err);
27811 return -ENOLINK;
27812 }
27813 size = skb->len+sizeof(struct atmtcp_hdr);
27814@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27815 if (!new_skb) {
27816 if (vcc->pop) vcc->pop(vcc,skb);
27817 else dev_kfree_skb(skb);
27818- atomic_inc(&vcc->stats->tx_err);
27819+ atomic_inc_unchecked(&vcc->stats->tx_err);
27820 return -ENOBUFS;
27821 }
27822 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
27823@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27824 if (vcc->pop) vcc->pop(vcc,skb);
27825 else dev_kfree_skb(skb);
27826 out_vcc->push(out_vcc,new_skb);
27827- atomic_inc(&vcc->stats->tx);
27828- atomic_inc(&out_vcc->stats->rx);
27829+ atomic_inc_unchecked(&vcc->stats->tx);
27830+ atomic_inc_unchecked(&out_vcc->stats->rx);
27831 return 0;
27832 }
27833
27834@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27835 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
27836 read_unlock(&vcc_sklist_lock);
27837 if (!out_vcc) {
27838- atomic_inc(&vcc->stats->tx_err);
27839+ atomic_inc_unchecked(&vcc->stats->tx_err);
27840 goto done;
27841 }
27842 skb_pull(skb,sizeof(struct atmtcp_hdr));
27843@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27844 __net_timestamp(new_skb);
27845 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
27846 out_vcc->push(out_vcc,new_skb);
27847- atomic_inc(&vcc->stats->tx);
27848- atomic_inc(&out_vcc->stats->rx);
27849+ atomic_inc_unchecked(&vcc->stats->tx);
27850+ atomic_inc_unchecked(&out_vcc->stats->rx);
27851 done:
27852 if (vcc->pop) vcc->pop(vcc,skb);
27853 else dev_kfree_skb(skb);
27854diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
27855index 2059ee4..faf51c7 100644
27856--- a/drivers/atm/eni.c
27857+++ b/drivers/atm/eni.c
27858@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
27859 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
27860 vcc->dev->number);
27861 length = 0;
27862- atomic_inc(&vcc->stats->rx_err);
27863+ atomic_inc_unchecked(&vcc->stats->rx_err);
27864 }
27865 else {
27866 length = ATM_CELL_SIZE-1; /* no HEC */
27867@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27868 size);
27869 }
27870 eff = length = 0;
27871- atomic_inc(&vcc->stats->rx_err);
27872+ atomic_inc_unchecked(&vcc->stats->rx_err);
27873 }
27874 else {
27875 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
27876@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27877 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
27878 vcc->dev->number,vcc->vci,length,size << 2,descr);
27879 length = eff = 0;
27880- atomic_inc(&vcc->stats->rx_err);
27881+ atomic_inc_unchecked(&vcc->stats->rx_err);
27882 }
27883 }
27884 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
27885@@ -767,7 +767,7 @@ rx_dequeued++;
27886 vcc->push(vcc,skb);
27887 pushed++;
27888 }
27889- atomic_inc(&vcc->stats->rx);
27890+ atomic_inc_unchecked(&vcc->stats->rx);
27891 }
27892 wake_up(&eni_dev->rx_wait);
27893 }
27894@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
27895 PCI_DMA_TODEVICE);
27896 if (vcc->pop) vcc->pop(vcc,skb);
27897 else dev_kfree_skb_irq(skb);
27898- atomic_inc(&vcc->stats->tx);
27899+ atomic_inc_unchecked(&vcc->stats->tx);
27900 wake_up(&eni_dev->tx_wait);
27901 dma_complete++;
27902 }
27903@@ -1567,7 +1567,7 @@ tx_complete++;
27904 /*--------------------------------- entries ---------------------------------*/
27905
27906
27907-static const char *media_name[] __devinitdata = {
27908+static const char *media_name[] __devinitconst = {
27909 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
27910 "UTP", "05?", "06?", "07?", /* 4- 7 */
27911 "TAXI","09?", "10?", "11?", /* 8-11 */
27912diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
27913index 86fed1b..6dc4721 100644
27914--- a/drivers/atm/firestream.c
27915+++ b/drivers/atm/firestream.c
27916@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
27917 }
27918 }
27919
27920- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27921+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27922
27923 fs_dprintk (FS_DEBUG_TXMEM, "i");
27924 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
27925@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
27926 #endif
27927 skb_put (skb, qe->p1 & 0xffff);
27928 ATM_SKB(skb)->vcc = atm_vcc;
27929- atomic_inc(&atm_vcc->stats->rx);
27930+ atomic_inc_unchecked(&atm_vcc->stats->rx);
27931 __net_timestamp(skb);
27932 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
27933 atm_vcc->push (atm_vcc, skb);
27934@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
27935 kfree (pe);
27936 }
27937 if (atm_vcc)
27938- atomic_inc(&atm_vcc->stats->rx_drop);
27939+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27940 break;
27941 case 0x1f: /* Reassembly abort: no buffers. */
27942 /* Silently increment error counter. */
27943 if (atm_vcc)
27944- atomic_inc(&atm_vcc->stats->rx_drop);
27945+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27946 break;
27947 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
27948 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
27949diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
27950index 361f5ae..7fc552d 100644
27951--- a/drivers/atm/fore200e.c
27952+++ b/drivers/atm/fore200e.c
27953@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
27954 #endif
27955 /* check error condition */
27956 if (*entry->status & STATUS_ERROR)
27957- atomic_inc(&vcc->stats->tx_err);
27958+ atomic_inc_unchecked(&vcc->stats->tx_err);
27959 else
27960- atomic_inc(&vcc->stats->tx);
27961+ atomic_inc_unchecked(&vcc->stats->tx);
27962 }
27963 }
27964
27965@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
27966 if (skb == NULL) {
27967 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
27968
27969- atomic_inc(&vcc->stats->rx_drop);
27970+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27971 return -ENOMEM;
27972 }
27973
27974@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
27975
27976 dev_kfree_skb_any(skb);
27977
27978- atomic_inc(&vcc->stats->rx_drop);
27979+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27980 return -ENOMEM;
27981 }
27982
27983 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
27984
27985 vcc->push(vcc, skb);
27986- atomic_inc(&vcc->stats->rx);
27987+ atomic_inc_unchecked(&vcc->stats->rx);
27988
27989 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
27990
27991@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
27992 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
27993 fore200e->atm_dev->number,
27994 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
27995- atomic_inc(&vcc->stats->rx_err);
27996+ atomic_inc_unchecked(&vcc->stats->rx_err);
27997 }
27998 }
27999
28000@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
28001 goto retry_here;
28002 }
28003
28004- atomic_inc(&vcc->stats->tx_err);
28005+ atomic_inc_unchecked(&vcc->stats->tx_err);
28006
28007 fore200e->tx_sat++;
28008 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
28009diff --git a/drivers/atm/he.c b/drivers/atm/he.c
28010index b182c2f..1c6fa8a 100644
28011--- a/drivers/atm/he.c
28012+++ b/drivers/atm/he.c
28013@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
28014
28015 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
28016 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
28017- atomic_inc(&vcc->stats->rx_drop);
28018+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28019 goto return_host_buffers;
28020 }
28021
28022@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
28023 RBRQ_LEN_ERR(he_dev->rbrq_head)
28024 ? "LEN_ERR" : "",
28025 vcc->vpi, vcc->vci);
28026- atomic_inc(&vcc->stats->rx_err);
28027+ atomic_inc_unchecked(&vcc->stats->rx_err);
28028 goto return_host_buffers;
28029 }
28030
28031@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
28032 vcc->push(vcc, skb);
28033 spin_lock(&he_dev->global_lock);
28034
28035- atomic_inc(&vcc->stats->rx);
28036+ atomic_inc_unchecked(&vcc->stats->rx);
28037
28038 return_host_buffers:
28039 ++pdus_assembled;
28040@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
28041 tpd->vcc->pop(tpd->vcc, tpd->skb);
28042 else
28043 dev_kfree_skb_any(tpd->skb);
28044- atomic_inc(&tpd->vcc->stats->tx_err);
28045+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
28046 }
28047 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
28048 return;
28049@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28050 vcc->pop(vcc, skb);
28051 else
28052 dev_kfree_skb_any(skb);
28053- atomic_inc(&vcc->stats->tx_err);
28054+ atomic_inc_unchecked(&vcc->stats->tx_err);
28055 return -EINVAL;
28056 }
28057
28058@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28059 vcc->pop(vcc, skb);
28060 else
28061 dev_kfree_skb_any(skb);
28062- atomic_inc(&vcc->stats->tx_err);
28063+ atomic_inc_unchecked(&vcc->stats->tx_err);
28064 return -EINVAL;
28065 }
28066 #endif
28067@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28068 vcc->pop(vcc, skb);
28069 else
28070 dev_kfree_skb_any(skb);
28071- atomic_inc(&vcc->stats->tx_err);
28072+ atomic_inc_unchecked(&vcc->stats->tx_err);
28073 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28074 return -ENOMEM;
28075 }
28076@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28077 vcc->pop(vcc, skb);
28078 else
28079 dev_kfree_skb_any(skb);
28080- atomic_inc(&vcc->stats->tx_err);
28081+ atomic_inc_unchecked(&vcc->stats->tx_err);
28082 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28083 return -ENOMEM;
28084 }
28085@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28086 __enqueue_tpd(he_dev, tpd, cid);
28087 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28088
28089- atomic_inc(&vcc->stats->tx);
28090+ atomic_inc_unchecked(&vcc->stats->tx);
28091
28092 return 0;
28093 }
28094diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
28095index 75fd691..2d20b14 100644
28096--- a/drivers/atm/horizon.c
28097+++ b/drivers/atm/horizon.c
28098@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
28099 {
28100 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
28101 // VC layer stats
28102- atomic_inc(&vcc->stats->rx);
28103+ atomic_inc_unchecked(&vcc->stats->rx);
28104 __net_timestamp(skb);
28105 // end of our responsibility
28106 vcc->push (vcc, skb);
28107@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
28108 dev->tx_iovec = NULL;
28109
28110 // VC layer stats
28111- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
28112+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
28113
28114 // free the skb
28115 hrz_kfree_skb (skb);
28116diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
28117index 1c05212..c28e200 100644
28118--- a/drivers/atm/idt77252.c
28119+++ b/drivers/atm/idt77252.c
28120@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
28121 else
28122 dev_kfree_skb(skb);
28123
28124- atomic_inc(&vcc->stats->tx);
28125+ atomic_inc_unchecked(&vcc->stats->tx);
28126 }
28127
28128 atomic_dec(&scq->used);
28129@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28130 if ((sb = dev_alloc_skb(64)) == NULL) {
28131 printk("%s: Can't allocate buffers for aal0.\n",
28132 card->name);
28133- atomic_add(i, &vcc->stats->rx_drop);
28134+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
28135 break;
28136 }
28137 if (!atm_charge(vcc, sb->truesize)) {
28138 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
28139 card->name);
28140- atomic_add(i - 1, &vcc->stats->rx_drop);
28141+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
28142 dev_kfree_skb(sb);
28143 break;
28144 }
28145@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28146 ATM_SKB(sb)->vcc = vcc;
28147 __net_timestamp(sb);
28148 vcc->push(vcc, sb);
28149- atomic_inc(&vcc->stats->rx);
28150+ atomic_inc_unchecked(&vcc->stats->rx);
28151
28152 cell += ATM_CELL_PAYLOAD;
28153 }
28154@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28155 "(CDC: %08x)\n",
28156 card->name, len, rpp->len, readl(SAR_REG_CDC));
28157 recycle_rx_pool_skb(card, rpp);
28158- atomic_inc(&vcc->stats->rx_err);
28159+ atomic_inc_unchecked(&vcc->stats->rx_err);
28160 return;
28161 }
28162 if (stat & SAR_RSQE_CRC) {
28163 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
28164 recycle_rx_pool_skb(card, rpp);
28165- atomic_inc(&vcc->stats->rx_err);
28166+ atomic_inc_unchecked(&vcc->stats->rx_err);
28167 return;
28168 }
28169 if (skb_queue_len(&rpp->queue) > 1) {
28170@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28171 RXPRINTK("%s: Can't alloc RX skb.\n",
28172 card->name);
28173 recycle_rx_pool_skb(card, rpp);
28174- atomic_inc(&vcc->stats->rx_err);
28175+ atomic_inc_unchecked(&vcc->stats->rx_err);
28176 return;
28177 }
28178 if (!atm_charge(vcc, skb->truesize)) {
28179@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28180 __net_timestamp(skb);
28181
28182 vcc->push(vcc, skb);
28183- atomic_inc(&vcc->stats->rx);
28184+ atomic_inc_unchecked(&vcc->stats->rx);
28185
28186 return;
28187 }
28188@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28189 __net_timestamp(skb);
28190
28191 vcc->push(vcc, skb);
28192- atomic_inc(&vcc->stats->rx);
28193+ atomic_inc_unchecked(&vcc->stats->rx);
28194
28195 if (skb->truesize > SAR_FB_SIZE_3)
28196 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
28197@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
28198 if (vcc->qos.aal != ATM_AAL0) {
28199 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
28200 card->name, vpi, vci);
28201- atomic_inc(&vcc->stats->rx_drop);
28202+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28203 goto drop;
28204 }
28205
28206 if ((sb = dev_alloc_skb(64)) == NULL) {
28207 printk("%s: Can't allocate buffers for AAL0.\n",
28208 card->name);
28209- atomic_inc(&vcc->stats->rx_err);
28210+ atomic_inc_unchecked(&vcc->stats->rx_err);
28211 goto drop;
28212 }
28213
28214@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
28215 ATM_SKB(sb)->vcc = vcc;
28216 __net_timestamp(sb);
28217 vcc->push(vcc, sb);
28218- atomic_inc(&vcc->stats->rx);
28219+ atomic_inc_unchecked(&vcc->stats->rx);
28220
28221 drop:
28222 skb_pull(queue, 64);
28223@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28224
28225 if (vc == NULL) {
28226 printk("%s: NULL connection in send().\n", card->name);
28227- atomic_inc(&vcc->stats->tx_err);
28228+ atomic_inc_unchecked(&vcc->stats->tx_err);
28229 dev_kfree_skb(skb);
28230 return -EINVAL;
28231 }
28232 if (!test_bit(VCF_TX, &vc->flags)) {
28233 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
28234- atomic_inc(&vcc->stats->tx_err);
28235+ atomic_inc_unchecked(&vcc->stats->tx_err);
28236 dev_kfree_skb(skb);
28237 return -EINVAL;
28238 }
28239@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28240 break;
28241 default:
28242 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
28243- atomic_inc(&vcc->stats->tx_err);
28244+ atomic_inc_unchecked(&vcc->stats->tx_err);
28245 dev_kfree_skb(skb);
28246 return -EINVAL;
28247 }
28248
28249 if (skb_shinfo(skb)->nr_frags != 0) {
28250 printk("%s: No scatter-gather yet.\n", card->name);
28251- atomic_inc(&vcc->stats->tx_err);
28252+ atomic_inc_unchecked(&vcc->stats->tx_err);
28253 dev_kfree_skb(skb);
28254 return -EINVAL;
28255 }
28256@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28257
28258 err = queue_skb(card, vc, skb, oam);
28259 if (err) {
28260- atomic_inc(&vcc->stats->tx_err);
28261+ atomic_inc_unchecked(&vcc->stats->tx_err);
28262 dev_kfree_skb(skb);
28263 return err;
28264 }
28265@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
28266 skb = dev_alloc_skb(64);
28267 if (!skb) {
28268 printk("%s: Out of memory in send_oam().\n", card->name);
28269- atomic_inc(&vcc->stats->tx_err);
28270+ atomic_inc_unchecked(&vcc->stats->tx_err);
28271 return -ENOMEM;
28272 }
28273 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
28274diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
28275index d438601..8b98495 100644
28276--- a/drivers/atm/iphase.c
28277+++ b/drivers/atm/iphase.c
28278@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
28279 status = (u_short) (buf_desc_ptr->desc_mode);
28280 if (status & (RX_CER | RX_PTE | RX_OFL))
28281 {
28282- atomic_inc(&vcc->stats->rx_err);
28283+ atomic_inc_unchecked(&vcc->stats->rx_err);
28284 IF_ERR(printk("IA: bad packet, dropping it");)
28285 if (status & RX_CER) {
28286 IF_ERR(printk(" cause: packet CRC error\n");)
28287@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
28288 len = dma_addr - buf_addr;
28289 if (len > iadev->rx_buf_sz) {
28290 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
28291- atomic_inc(&vcc->stats->rx_err);
28292+ atomic_inc_unchecked(&vcc->stats->rx_err);
28293 goto out_free_desc;
28294 }
28295
28296@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28297 ia_vcc = INPH_IA_VCC(vcc);
28298 if (ia_vcc == NULL)
28299 {
28300- atomic_inc(&vcc->stats->rx_err);
28301+ atomic_inc_unchecked(&vcc->stats->rx_err);
28302 atm_return(vcc, skb->truesize);
28303 dev_kfree_skb_any(skb);
28304 goto INCR_DLE;
28305@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28306 if ((length > iadev->rx_buf_sz) || (length >
28307 (skb->len - sizeof(struct cpcs_trailer))))
28308 {
28309- atomic_inc(&vcc->stats->rx_err);
28310+ atomic_inc_unchecked(&vcc->stats->rx_err);
28311 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
28312 length, skb->len);)
28313 atm_return(vcc, skb->truesize);
28314@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28315
28316 IF_RX(printk("rx_dle_intr: skb push");)
28317 vcc->push(vcc,skb);
28318- atomic_inc(&vcc->stats->rx);
28319+ atomic_inc_unchecked(&vcc->stats->rx);
28320 iadev->rx_pkt_cnt++;
28321 }
28322 INCR_DLE:
28323@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
28324 {
28325 struct k_sonet_stats *stats;
28326 stats = &PRIV(_ia_dev[board])->sonet_stats;
28327- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
28328- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
28329- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
28330- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
28331- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
28332- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
28333- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
28334- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
28335- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
28336+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
28337+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
28338+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
28339+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
28340+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
28341+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
28342+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
28343+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
28344+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
28345 }
28346 ia_cmds.status = 0;
28347 break;
28348@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28349 if ((desc == 0) || (desc > iadev->num_tx_desc))
28350 {
28351 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
28352- atomic_inc(&vcc->stats->tx);
28353+ atomic_inc_unchecked(&vcc->stats->tx);
28354 if (vcc->pop)
28355 vcc->pop(vcc, skb);
28356 else
28357@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28358 ATM_DESC(skb) = vcc->vci;
28359 skb_queue_tail(&iadev->tx_dma_q, skb);
28360
28361- atomic_inc(&vcc->stats->tx);
28362+ atomic_inc_unchecked(&vcc->stats->tx);
28363 iadev->tx_pkt_cnt++;
28364 /* Increment transaction counter */
28365 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
28366
28367 #if 0
28368 /* add flow control logic */
28369- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
28370+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
28371 if (iavcc->vc_desc_cnt > 10) {
28372 vcc->tx_quota = vcc->tx_quota * 3 / 4;
28373 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
28374diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
28375index 68c7588..7036683 100644
28376--- a/drivers/atm/lanai.c
28377+++ b/drivers/atm/lanai.c
28378@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
28379 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
28380 lanai_endtx(lanai, lvcc);
28381 lanai_free_skb(lvcc->tx.atmvcc, skb);
28382- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
28383+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
28384 }
28385
28386 /* Try to fill the buffer - don't call unless there is backlog */
28387@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
28388 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
28389 __net_timestamp(skb);
28390 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
28391- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
28392+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
28393 out:
28394 lvcc->rx.buf.ptr = end;
28395 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
28396@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28397 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
28398 "vcc %d\n", lanai->number, (unsigned int) s, vci);
28399 lanai->stats.service_rxnotaal5++;
28400- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28401+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28402 return 0;
28403 }
28404 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
28405@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28406 int bytes;
28407 read_unlock(&vcc_sklist_lock);
28408 DPRINTK("got trashed rx pdu on vci %d\n", vci);
28409- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28410+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28411 lvcc->stats.x.aal5.service_trash++;
28412 bytes = (SERVICE_GET_END(s) * 16) -
28413 (((unsigned long) lvcc->rx.buf.ptr) -
28414@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28415 }
28416 if (s & SERVICE_STREAM) {
28417 read_unlock(&vcc_sklist_lock);
28418- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28419+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28420 lvcc->stats.x.aal5.service_stream++;
28421 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
28422 "PDU on VCI %d!\n", lanai->number, vci);
28423@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28424 return 0;
28425 }
28426 DPRINTK("got rx crc error on vci %d\n", vci);
28427- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28428+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28429 lvcc->stats.x.aal5.service_rxcrc++;
28430 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
28431 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
28432diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
28433index 1c70c45..300718d 100644
28434--- a/drivers/atm/nicstar.c
28435+++ b/drivers/atm/nicstar.c
28436@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28437 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
28438 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
28439 card->index);
28440- atomic_inc(&vcc->stats->tx_err);
28441+ atomic_inc_unchecked(&vcc->stats->tx_err);
28442 dev_kfree_skb_any(skb);
28443 return -EINVAL;
28444 }
28445@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28446 if (!vc->tx) {
28447 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
28448 card->index);
28449- atomic_inc(&vcc->stats->tx_err);
28450+ atomic_inc_unchecked(&vcc->stats->tx_err);
28451 dev_kfree_skb_any(skb);
28452 return -EINVAL;
28453 }
28454@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28455 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
28456 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
28457 card->index);
28458- atomic_inc(&vcc->stats->tx_err);
28459+ atomic_inc_unchecked(&vcc->stats->tx_err);
28460 dev_kfree_skb_any(skb);
28461 return -EINVAL;
28462 }
28463
28464 if (skb_shinfo(skb)->nr_frags != 0) {
28465 printk("nicstar%d: No scatter-gather yet.\n", card->index);
28466- atomic_inc(&vcc->stats->tx_err);
28467+ atomic_inc_unchecked(&vcc->stats->tx_err);
28468 dev_kfree_skb_any(skb);
28469 return -EINVAL;
28470 }
28471@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28472 }
28473
28474 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
28475- atomic_inc(&vcc->stats->tx_err);
28476+ atomic_inc_unchecked(&vcc->stats->tx_err);
28477 dev_kfree_skb_any(skb);
28478 return -EIO;
28479 }
28480- atomic_inc(&vcc->stats->tx);
28481+ atomic_inc_unchecked(&vcc->stats->tx);
28482
28483 return 0;
28484 }
28485@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28486 printk
28487 ("nicstar%d: Can't allocate buffers for aal0.\n",
28488 card->index);
28489- atomic_add(i, &vcc->stats->rx_drop);
28490+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
28491 break;
28492 }
28493 if (!atm_charge(vcc, sb->truesize)) {
28494 RXPRINTK
28495 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
28496 card->index);
28497- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28498+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28499 dev_kfree_skb_any(sb);
28500 break;
28501 }
28502@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28503 ATM_SKB(sb)->vcc = vcc;
28504 __net_timestamp(sb);
28505 vcc->push(vcc, sb);
28506- atomic_inc(&vcc->stats->rx);
28507+ atomic_inc_unchecked(&vcc->stats->rx);
28508 cell += ATM_CELL_PAYLOAD;
28509 }
28510
28511@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28512 if (iovb == NULL) {
28513 printk("nicstar%d: Out of iovec buffers.\n",
28514 card->index);
28515- atomic_inc(&vcc->stats->rx_drop);
28516+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28517 recycle_rx_buf(card, skb);
28518 return;
28519 }
28520@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28521 small or large buffer itself. */
28522 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
28523 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
28524- atomic_inc(&vcc->stats->rx_err);
28525+ atomic_inc_unchecked(&vcc->stats->rx_err);
28526 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28527 NS_MAX_IOVECS);
28528 NS_PRV_IOVCNT(iovb) = 0;
28529@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28530 ("nicstar%d: Expected a small buffer, and this is not one.\n",
28531 card->index);
28532 which_list(card, skb);
28533- atomic_inc(&vcc->stats->rx_err);
28534+ atomic_inc_unchecked(&vcc->stats->rx_err);
28535 recycle_rx_buf(card, skb);
28536 vc->rx_iov = NULL;
28537 recycle_iov_buf(card, iovb);
28538@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28539 ("nicstar%d: Expected a large buffer, and this is not one.\n",
28540 card->index);
28541 which_list(card, skb);
28542- atomic_inc(&vcc->stats->rx_err);
28543+ atomic_inc_unchecked(&vcc->stats->rx_err);
28544 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28545 NS_PRV_IOVCNT(iovb));
28546 vc->rx_iov = NULL;
28547@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28548 printk(" - PDU size mismatch.\n");
28549 else
28550 printk(".\n");
28551- atomic_inc(&vcc->stats->rx_err);
28552+ atomic_inc_unchecked(&vcc->stats->rx_err);
28553 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28554 NS_PRV_IOVCNT(iovb));
28555 vc->rx_iov = NULL;
28556@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28557 /* skb points to a small buffer */
28558 if (!atm_charge(vcc, skb->truesize)) {
28559 push_rxbufs(card, skb);
28560- atomic_inc(&vcc->stats->rx_drop);
28561+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28562 } else {
28563 skb_put(skb, len);
28564 dequeue_sm_buf(card, skb);
28565@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28566 ATM_SKB(skb)->vcc = vcc;
28567 __net_timestamp(skb);
28568 vcc->push(vcc, skb);
28569- atomic_inc(&vcc->stats->rx);
28570+ atomic_inc_unchecked(&vcc->stats->rx);
28571 }
28572 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
28573 struct sk_buff *sb;
28574@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28575 if (len <= NS_SMBUFSIZE) {
28576 if (!atm_charge(vcc, sb->truesize)) {
28577 push_rxbufs(card, sb);
28578- atomic_inc(&vcc->stats->rx_drop);
28579+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28580 } else {
28581 skb_put(sb, len);
28582 dequeue_sm_buf(card, sb);
28583@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28584 ATM_SKB(sb)->vcc = vcc;
28585 __net_timestamp(sb);
28586 vcc->push(vcc, sb);
28587- atomic_inc(&vcc->stats->rx);
28588+ atomic_inc_unchecked(&vcc->stats->rx);
28589 }
28590
28591 push_rxbufs(card, skb);
28592@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28593
28594 if (!atm_charge(vcc, skb->truesize)) {
28595 push_rxbufs(card, skb);
28596- atomic_inc(&vcc->stats->rx_drop);
28597+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28598 } else {
28599 dequeue_lg_buf(card, skb);
28600 #ifdef NS_USE_DESTRUCTORS
28601@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28602 ATM_SKB(skb)->vcc = vcc;
28603 __net_timestamp(skb);
28604 vcc->push(vcc, skb);
28605- atomic_inc(&vcc->stats->rx);
28606+ atomic_inc_unchecked(&vcc->stats->rx);
28607 }
28608
28609 push_rxbufs(card, sb);
28610@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28611 printk
28612 ("nicstar%d: Out of huge buffers.\n",
28613 card->index);
28614- atomic_inc(&vcc->stats->rx_drop);
28615+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28616 recycle_iovec_rx_bufs(card,
28617 (struct iovec *)
28618 iovb->data,
28619@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28620 card->hbpool.count++;
28621 } else
28622 dev_kfree_skb_any(hb);
28623- atomic_inc(&vcc->stats->rx_drop);
28624+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28625 } else {
28626 /* Copy the small buffer to the huge buffer */
28627 sb = (struct sk_buff *)iov->iov_base;
28628@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28629 #endif /* NS_USE_DESTRUCTORS */
28630 __net_timestamp(hb);
28631 vcc->push(vcc, hb);
28632- atomic_inc(&vcc->stats->rx);
28633+ atomic_inc_unchecked(&vcc->stats->rx);
28634 }
28635 }
28636
28637diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
28638index 9851093..adb2b1e 100644
28639--- a/drivers/atm/solos-pci.c
28640+++ b/drivers/atm/solos-pci.c
28641@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
28642 }
28643 atm_charge(vcc, skb->truesize);
28644 vcc->push(vcc, skb);
28645- atomic_inc(&vcc->stats->rx);
28646+ atomic_inc_unchecked(&vcc->stats->rx);
28647 break;
28648
28649 case PKT_STATUS:
28650@@ -1009,7 +1009,7 @@ static uint32_t fpga_tx(struct solos_card *card)
28651 vcc = SKB_CB(oldskb)->vcc;
28652
28653 if (vcc) {
28654- atomic_inc(&vcc->stats->tx);
28655+ atomic_inc_unchecked(&vcc->stats->tx);
28656 solos_pop(vcc, oldskb);
28657 } else
28658 dev_kfree_skb_irq(oldskb);
28659diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
28660index 0215934..ce9f5b1 100644
28661--- a/drivers/atm/suni.c
28662+++ b/drivers/atm/suni.c
28663@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
28664
28665
28666 #define ADD_LIMITED(s,v) \
28667- atomic_add((v),&stats->s); \
28668- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
28669+ atomic_add_unchecked((v),&stats->s); \
28670+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
28671
28672
28673 static void suni_hz(unsigned long from_timer)
28674diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
28675index 5120a96..e2572bd 100644
28676--- a/drivers/atm/uPD98402.c
28677+++ b/drivers/atm/uPD98402.c
28678@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
28679 struct sonet_stats tmp;
28680 int error = 0;
28681
28682- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28683+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28684 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
28685 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
28686 if (zero && !error) {
28687@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
28688
28689
28690 #define ADD_LIMITED(s,v) \
28691- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
28692- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
28693- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28694+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
28695+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
28696+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28697
28698
28699 static void stat_event(struct atm_dev *dev)
28700@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
28701 if (reason & uPD98402_INT_PFM) stat_event(dev);
28702 if (reason & uPD98402_INT_PCO) {
28703 (void) GET(PCOCR); /* clear interrupt cause */
28704- atomic_add(GET(HECCT),
28705+ atomic_add_unchecked(GET(HECCT),
28706 &PRIV(dev)->sonet_stats.uncorr_hcs);
28707 }
28708 if ((reason & uPD98402_INT_RFO) &&
28709@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
28710 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
28711 uPD98402_INT_LOS),PIMR); /* enable them */
28712 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
28713- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28714- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
28715- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
28716+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28717+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
28718+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
28719 return 0;
28720 }
28721
28722diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
28723index abe4e20..83c4727 100644
28724--- a/drivers/atm/zatm.c
28725+++ b/drivers/atm/zatm.c
28726@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28727 }
28728 if (!size) {
28729 dev_kfree_skb_irq(skb);
28730- if (vcc) atomic_inc(&vcc->stats->rx_err);
28731+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
28732 continue;
28733 }
28734 if (!atm_charge(vcc,skb->truesize)) {
28735@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28736 skb->len = size;
28737 ATM_SKB(skb)->vcc = vcc;
28738 vcc->push(vcc,skb);
28739- atomic_inc(&vcc->stats->rx);
28740+ atomic_inc_unchecked(&vcc->stats->rx);
28741 }
28742 zout(pos & 0xffff,MTA(mbx));
28743 #if 0 /* probably a stupid idea */
28744@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
28745 skb_queue_head(&zatm_vcc->backlog,skb);
28746 break;
28747 }
28748- atomic_inc(&vcc->stats->tx);
28749+ atomic_inc_unchecked(&vcc->stats->tx);
28750 wake_up(&zatm_vcc->tx_wait);
28751 }
28752
28753diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
28754index 8493536..31adee0 100644
28755--- a/drivers/base/devtmpfs.c
28756+++ b/drivers/base/devtmpfs.c
28757@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
28758 if (!thread)
28759 return 0;
28760
28761- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
28762+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
28763 if (err)
28764 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
28765 else
28766diff --git a/drivers/base/node.c b/drivers/base/node.c
28767index 90aa2a1..af1a177 100644
28768--- a/drivers/base/node.c
28769+++ b/drivers/base/node.c
28770@@ -592,11 +592,9 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
28771 {
28772 int n;
28773
28774- n = nodelist_scnprintf(buf, PAGE_SIZE, node_states[state]);
28775- if (n > 0 && PAGE_SIZE > n + 1) {
28776- *(buf + n++) = '\n';
28777- *(buf + n++) = '\0';
28778- }
28779+ n = nodelist_scnprintf(buf, PAGE_SIZE-2, node_states[state]);
28780+ buf[n++] = '\n';
28781+ buf[n] = '\0';
28782 return n;
28783 }
28784
28785diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
28786index 2a3e581..3d6a73f 100644
28787--- a/drivers/base/power/wakeup.c
28788+++ b/drivers/base/power/wakeup.c
28789@@ -30,14 +30,14 @@ bool events_check_enabled;
28790 * They need to be modified together atomically, so it's better to use one
28791 * atomic variable to hold them both.
28792 */
28793-static atomic_t combined_event_count = ATOMIC_INIT(0);
28794+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
28795
28796 #define IN_PROGRESS_BITS (sizeof(int) * 4)
28797 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
28798
28799 static void split_counters(unsigned int *cnt, unsigned int *inpr)
28800 {
28801- unsigned int comb = atomic_read(&combined_event_count);
28802+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
28803
28804 *cnt = (comb >> IN_PROGRESS_BITS);
28805 *inpr = comb & MAX_IN_PROGRESS;
28806@@ -379,7 +379,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
28807 ws->last_time = ktime_get();
28808
28809 /* Increment the counter of events in progress. */
28810- atomic_inc(&combined_event_count);
28811+ atomic_inc_unchecked(&combined_event_count);
28812 }
28813
28814 /**
28815@@ -475,7 +475,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
28816 * Increment the counter of registered wakeup events and decrement the
28817 * couter of wakeup events in progress simultaneously.
28818 */
28819- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
28820+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
28821 }
28822
28823 /**
28824diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
28825index b0f553b..77b928b 100644
28826--- a/drivers/block/cciss.c
28827+++ b/drivers/block/cciss.c
28828@@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
28829 int err;
28830 u32 cp;
28831
28832+ memset(&arg64, 0, sizeof(arg64));
28833+
28834 err = 0;
28835 err |=
28836 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
28837@@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
28838 while (!list_empty(&h->reqQ)) {
28839 c = list_entry(h->reqQ.next, CommandList_struct, list);
28840 /* can't do anything if fifo is full */
28841- if ((h->access.fifo_full(h))) {
28842+ if ((h->access->fifo_full(h))) {
28843 dev_warn(&h->pdev->dev, "fifo full\n");
28844 break;
28845 }
28846@@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
28847 h->Qdepth--;
28848
28849 /* Tell the controller execute command */
28850- h->access.submit_command(h, c);
28851+ h->access->submit_command(h, c);
28852
28853 /* Put job onto the completed Q */
28854 addQ(&h->cmpQ, c);
28855@@ -3443,17 +3445,17 @@ startio:
28856
28857 static inline unsigned long get_next_completion(ctlr_info_t *h)
28858 {
28859- return h->access.command_completed(h);
28860+ return h->access->command_completed(h);
28861 }
28862
28863 static inline int interrupt_pending(ctlr_info_t *h)
28864 {
28865- return h->access.intr_pending(h);
28866+ return h->access->intr_pending(h);
28867 }
28868
28869 static inline long interrupt_not_for_us(ctlr_info_t *h)
28870 {
28871- return ((h->access.intr_pending(h) == 0) ||
28872+ return ((h->access->intr_pending(h) == 0) ||
28873 (h->interrupts_enabled == 0));
28874 }
28875
28876@@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
28877 u32 a;
28878
28879 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
28880- return h->access.command_completed(h);
28881+ return h->access->command_completed(h);
28882
28883 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
28884 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
28885@@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
28886 trans_support & CFGTBL_Trans_use_short_tags);
28887
28888 /* Change the access methods to the performant access methods */
28889- h->access = SA5_performant_access;
28890+ h->access = &SA5_performant_access;
28891 h->transMethod = CFGTBL_Trans_Performant;
28892
28893 return;
28894@@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
28895 if (prod_index < 0)
28896 return -ENODEV;
28897 h->product_name = products[prod_index].product_name;
28898- h->access = *(products[prod_index].access);
28899+ h->access = products[prod_index].access;
28900
28901 if (cciss_board_disabled(h)) {
28902 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
28903@@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
28904 }
28905
28906 /* make sure the board interrupts are off */
28907- h->access.set_intr_mask(h, CCISS_INTR_OFF);
28908+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
28909 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
28910 if (rc)
28911 goto clean2;
28912@@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
28913 * fake ones to scoop up any residual completions.
28914 */
28915 spin_lock_irqsave(&h->lock, flags);
28916- h->access.set_intr_mask(h, CCISS_INTR_OFF);
28917+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
28918 spin_unlock_irqrestore(&h->lock, flags);
28919 free_irq(h->intr[h->intr_mode], h);
28920 rc = cciss_request_irq(h, cciss_msix_discard_completions,
28921@@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
28922 dev_info(&h->pdev->dev, "Board READY.\n");
28923 dev_info(&h->pdev->dev,
28924 "Waiting for stale completions to drain.\n");
28925- h->access.set_intr_mask(h, CCISS_INTR_ON);
28926+ h->access->set_intr_mask(h, CCISS_INTR_ON);
28927 msleep(10000);
28928- h->access.set_intr_mask(h, CCISS_INTR_OFF);
28929+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
28930
28931 rc = controller_reset_failed(h->cfgtable);
28932 if (rc)
28933@@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
28934 cciss_scsi_setup(h);
28935
28936 /* Turn the interrupts on so we can service requests */
28937- h->access.set_intr_mask(h, CCISS_INTR_ON);
28938+ h->access->set_intr_mask(h, CCISS_INTR_ON);
28939
28940 /* Get the firmware version */
28941 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
28942@@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
28943 kfree(flush_buf);
28944 if (return_code != IO_OK)
28945 dev_warn(&h->pdev->dev, "Error flushing cache\n");
28946- h->access.set_intr_mask(h, CCISS_INTR_OFF);
28947+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
28948 free_irq(h->intr[h->intr_mode], h);
28949 }
28950
28951diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
28952index 7fda30e..eb5dfe0 100644
28953--- a/drivers/block/cciss.h
28954+++ b/drivers/block/cciss.h
28955@@ -101,7 +101,7 @@ struct ctlr_info
28956 /* information about each logical volume */
28957 drive_info_struct *drv[CISS_MAX_LUN];
28958
28959- struct access_method access;
28960+ struct access_method *access;
28961
28962 /* queue and queue Info */
28963 struct list_head reqQ;
28964diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
28965index 9125bbe..eede5c8 100644
28966--- a/drivers/block/cpqarray.c
28967+++ b/drivers/block/cpqarray.c
28968@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
28969 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
28970 goto Enomem4;
28971 }
28972- hba[i]->access.set_intr_mask(hba[i], 0);
28973+ hba[i]->access->set_intr_mask(hba[i], 0);
28974 if (request_irq(hba[i]->intr, do_ida_intr,
28975 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
28976 {
28977@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
28978 add_timer(&hba[i]->timer);
28979
28980 /* Enable IRQ now that spinlock and rate limit timer are set up */
28981- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
28982+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
28983
28984 for(j=0; j<NWD; j++) {
28985 struct gendisk *disk = ida_gendisk[i][j];
28986@@ -694,7 +694,7 @@ DBGINFO(
28987 for(i=0; i<NR_PRODUCTS; i++) {
28988 if (board_id == products[i].board_id) {
28989 c->product_name = products[i].product_name;
28990- c->access = *(products[i].access);
28991+ c->access = products[i].access;
28992 break;
28993 }
28994 }
28995@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
28996 hba[ctlr]->intr = intr;
28997 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
28998 hba[ctlr]->product_name = products[j].product_name;
28999- hba[ctlr]->access = *(products[j].access);
29000+ hba[ctlr]->access = products[j].access;
29001 hba[ctlr]->ctlr = ctlr;
29002 hba[ctlr]->board_id = board_id;
29003 hba[ctlr]->pci_dev = NULL; /* not PCI */
29004@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
29005
29006 while((c = h->reqQ) != NULL) {
29007 /* Can't do anything if we're busy */
29008- if (h->access.fifo_full(h) == 0)
29009+ if (h->access->fifo_full(h) == 0)
29010 return;
29011
29012 /* Get the first entry from the request Q */
29013@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
29014 h->Qdepth--;
29015
29016 /* Tell the controller to do our bidding */
29017- h->access.submit_command(h, c);
29018+ h->access->submit_command(h, c);
29019
29020 /* Get onto the completion Q */
29021 addQ(&h->cmpQ, c);
29022@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
29023 unsigned long flags;
29024 __u32 a,a1;
29025
29026- istat = h->access.intr_pending(h);
29027+ istat = h->access->intr_pending(h);
29028 /* Is this interrupt for us? */
29029 if (istat == 0)
29030 return IRQ_NONE;
29031@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
29032 */
29033 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
29034 if (istat & FIFO_NOT_EMPTY) {
29035- while((a = h->access.command_completed(h))) {
29036+ while((a = h->access->command_completed(h))) {
29037 a1 = a; a &= ~3;
29038 if ((c = h->cmpQ) == NULL)
29039 {
29040@@ -1449,11 +1449,11 @@ static int sendcmd(
29041 /*
29042 * Disable interrupt
29043 */
29044- info_p->access.set_intr_mask(info_p, 0);
29045+ info_p->access->set_intr_mask(info_p, 0);
29046 /* Make sure there is room in the command FIFO */
29047 /* Actually it should be completely empty at this time. */
29048 for (i = 200000; i > 0; i--) {
29049- temp = info_p->access.fifo_full(info_p);
29050+ temp = info_p->access->fifo_full(info_p);
29051 if (temp != 0) {
29052 break;
29053 }
29054@@ -1466,7 +1466,7 @@ DBG(
29055 /*
29056 * Send the cmd
29057 */
29058- info_p->access.submit_command(info_p, c);
29059+ info_p->access->submit_command(info_p, c);
29060 complete = pollcomplete(ctlr);
29061
29062 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
29063@@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
29064 * we check the new geometry. Then turn interrupts back on when
29065 * we're done.
29066 */
29067- host->access.set_intr_mask(host, 0);
29068+ host->access->set_intr_mask(host, 0);
29069 getgeometry(ctlr);
29070- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
29071+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
29072
29073 for(i=0; i<NWD; i++) {
29074 struct gendisk *disk = ida_gendisk[ctlr][i];
29075@@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
29076 /* Wait (up to 2 seconds) for a command to complete */
29077
29078 for (i = 200000; i > 0; i--) {
29079- done = hba[ctlr]->access.command_completed(hba[ctlr]);
29080+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
29081 if (done == 0) {
29082 udelay(10); /* a short fixed delay */
29083 } else
29084diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
29085index be73e9d..7fbf140 100644
29086--- a/drivers/block/cpqarray.h
29087+++ b/drivers/block/cpqarray.h
29088@@ -99,7 +99,7 @@ struct ctlr_info {
29089 drv_info_t drv[NWD];
29090 struct proc_dir_entry *proc;
29091
29092- struct access_method access;
29093+ struct access_method *access;
29094
29095 cmdlist_t *reqQ;
29096 cmdlist_t *cmpQ;
29097diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
29098index 8d68056..e67050f 100644
29099--- a/drivers/block/drbd/drbd_int.h
29100+++ b/drivers/block/drbd/drbd_int.h
29101@@ -736,7 +736,7 @@ struct drbd_request;
29102 struct drbd_epoch {
29103 struct list_head list;
29104 unsigned int barrier_nr;
29105- atomic_t epoch_size; /* increased on every request added. */
29106+ atomic_unchecked_t epoch_size; /* increased on every request added. */
29107 atomic_t active; /* increased on every req. added, and dec on every finished. */
29108 unsigned long flags;
29109 };
29110@@ -1108,7 +1108,7 @@ struct drbd_conf {
29111 void *int_dig_in;
29112 void *int_dig_vv;
29113 wait_queue_head_t seq_wait;
29114- atomic_t packet_seq;
29115+ atomic_unchecked_t packet_seq;
29116 unsigned int peer_seq;
29117 spinlock_t peer_seq_lock;
29118 unsigned int minor;
29119@@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
29120
29121 static inline void drbd_tcp_cork(struct socket *sock)
29122 {
29123- int __user val = 1;
29124+ int val = 1;
29125 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29126- (char __user *)&val, sizeof(val));
29127+ (char __force_user *)&val, sizeof(val));
29128 }
29129
29130 static inline void drbd_tcp_uncork(struct socket *sock)
29131 {
29132- int __user val = 0;
29133+ int val = 0;
29134 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29135- (char __user *)&val, sizeof(val));
29136+ (char __force_user *)&val, sizeof(val));
29137 }
29138
29139 static inline void drbd_tcp_nodelay(struct socket *sock)
29140 {
29141- int __user val = 1;
29142+ int val = 1;
29143 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
29144- (char __user *)&val, sizeof(val));
29145+ (char __force_user *)&val, sizeof(val));
29146 }
29147
29148 static inline void drbd_tcp_quickack(struct socket *sock)
29149 {
29150- int __user val = 2;
29151+ int val = 2;
29152 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
29153- (char __user *)&val, sizeof(val));
29154+ (char __force_user *)&val, sizeof(val));
29155 }
29156
29157 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
29158diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
29159index 211fc44..c5116f1 100644
29160--- a/drivers/block/drbd/drbd_main.c
29161+++ b/drivers/block/drbd/drbd_main.c
29162@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
29163 p.sector = sector;
29164 p.block_id = block_id;
29165 p.blksize = blksize;
29166- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
29167+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
29168
29169 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
29170 return false;
29171@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
29172 p.sector = cpu_to_be64(req->sector);
29173 p.block_id = (unsigned long)req;
29174 p.seq_num = cpu_to_be32(req->seq_num =
29175- atomic_add_return(1, &mdev->packet_seq));
29176+ atomic_add_return_unchecked(1, &mdev->packet_seq));
29177
29178 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
29179
29180@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
29181 atomic_set(&mdev->unacked_cnt, 0);
29182 atomic_set(&mdev->local_cnt, 0);
29183 atomic_set(&mdev->net_cnt, 0);
29184- atomic_set(&mdev->packet_seq, 0);
29185+ atomic_set_unchecked(&mdev->packet_seq, 0);
29186 atomic_set(&mdev->pp_in_use, 0);
29187 atomic_set(&mdev->pp_in_use_by_net, 0);
29188 atomic_set(&mdev->rs_sect_in, 0);
29189@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
29190 mdev->receiver.t_state);
29191
29192 /* no need to lock it, I'm the only thread alive */
29193- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
29194- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
29195+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
29196+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
29197 mdev->al_writ_cnt =
29198 mdev->bm_writ_cnt =
29199 mdev->read_cnt =
29200diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
29201index 946166e..356b39a 100644
29202--- a/drivers/block/drbd/drbd_nl.c
29203+++ b/drivers/block/drbd/drbd_nl.c
29204@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
29205 module_put(THIS_MODULE);
29206 }
29207
29208-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29209+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29210
29211 static unsigned short *
29212 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
29213@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
29214 cn_reply->id.idx = CN_IDX_DRBD;
29215 cn_reply->id.val = CN_VAL_DRBD;
29216
29217- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29218+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29219 cn_reply->ack = 0; /* not used here. */
29220 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29221 (int)((char *)tl - (char *)reply->tag_list);
29222@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
29223 cn_reply->id.idx = CN_IDX_DRBD;
29224 cn_reply->id.val = CN_VAL_DRBD;
29225
29226- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29227+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29228 cn_reply->ack = 0; /* not used here. */
29229 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29230 (int)((char *)tl - (char *)reply->tag_list);
29231@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
29232 cn_reply->id.idx = CN_IDX_DRBD;
29233 cn_reply->id.val = CN_VAL_DRBD;
29234
29235- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
29236+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
29237 cn_reply->ack = 0; // not used here.
29238 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29239 (int)((char*)tl - (char*)reply->tag_list);
29240@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
29241 cn_reply->id.idx = CN_IDX_DRBD;
29242 cn_reply->id.val = CN_VAL_DRBD;
29243
29244- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29245+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29246 cn_reply->ack = 0; /* not used here. */
29247 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29248 (int)((char *)tl - (char *)reply->tag_list);
29249diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
29250index 43beaca..4a5b1dd 100644
29251--- a/drivers/block/drbd/drbd_receiver.c
29252+++ b/drivers/block/drbd/drbd_receiver.c
29253@@ -894,7 +894,7 @@ retry:
29254 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
29255 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
29256
29257- atomic_set(&mdev->packet_seq, 0);
29258+ atomic_set_unchecked(&mdev->packet_seq, 0);
29259 mdev->peer_seq = 0;
29260
29261 drbd_thread_start(&mdev->asender);
29262@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29263 do {
29264 next_epoch = NULL;
29265
29266- epoch_size = atomic_read(&epoch->epoch_size);
29267+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
29268
29269 switch (ev & ~EV_CLEANUP) {
29270 case EV_PUT:
29271@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29272 rv = FE_DESTROYED;
29273 } else {
29274 epoch->flags = 0;
29275- atomic_set(&epoch->epoch_size, 0);
29276+ atomic_set_unchecked(&epoch->epoch_size, 0);
29277 /* atomic_set(&epoch->active, 0); is already zero */
29278 if (rv == FE_STILL_LIVE)
29279 rv = FE_RECYCLED;
29280@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29281 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
29282 drbd_flush(mdev);
29283
29284- if (atomic_read(&mdev->current_epoch->epoch_size)) {
29285+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29286 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
29287 if (epoch)
29288 break;
29289 }
29290
29291 epoch = mdev->current_epoch;
29292- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
29293+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
29294
29295 D_ASSERT(atomic_read(&epoch->active) == 0);
29296 D_ASSERT(epoch->flags == 0);
29297@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29298 }
29299
29300 epoch->flags = 0;
29301- atomic_set(&epoch->epoch_size, 0);
29302+ atomic_set_unchecked(&epoch->epoch_size, 0);
29303 atomic_set(&epoch->active, 0);
29304
29305 spin_lock(&mdev->epoch_lock);
29306- if (atomic_read(&mdev->current_epoch->epoch_size)) {
29307+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29308 list_add(&epoch->list, &mdev->current_epoch->list);
29309 mdev->current_epoch = epoch;
29310 mdev->epochs++;
29311@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29312 spin_unlock(&mdev->peer_seq_lock);
29313
29314 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
29315- atomic_inc(&mdev->current_epoch->epoch_size);
29316+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
29317 return drbd_drain_block(mdev, data_size);
29318 }
29319
29320@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29321
29322 spin_lock(&mdev->epoch_lock);
29323 e->epoch = mdev->current_epoch;
29324- atomic_inc(&e->epoch->epoch_size);
29325+ atomic_inc_unchecked(&e->epoch->epoch_size);
29326 atomic_inc(&e->epoch->active);
29327 spin_unlock(&mdev->epoch_lock);
29328
29329@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
29330 D_ASSERT(list_empty(&mdev->done_ee));
29331
29332 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
29333- atomic_set(&mdev->current_epoch->epoch_size, 0);
29334+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
29335 D_ASSERT(list_empty(&mdev->current_epoch->list));
29336 }
29337
29338diff --git a/drivers/block/loop.c b/drivers/block/loop.c
29339index bbca966..65e37dd 100644
29340--- a/drivers/block/loop.c
29341+++ b/drivers/block/loop.c
29342@@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
29343 mm_segment_t old_fs = get_fs();
29344
29345 set_fs(get_ds());
29346- bw = file->f_op->write(file, buf, len, &pos);
29347+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
29348 set_fs(old_fs);
29349 if (likely(bw == len))
29350 return 0;
29351diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
29352index ee94686..3e09ad3 100644
29353--- a/drivers/char/Kconfig
29354+++ b/drivers/char/Kconfig
29355@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
29356
29357 config DEVKMEM
29358 bool "/dev/kmem virtual device support"
29359- default y
29360+ default n
29361+ depends on !GRKERNSEC_KMEM
29362 help
29363 Say Y here if you want to support the /dev/kmem device. The
29364 /dev/kmem device is rarely used, but can be used for certain
29365@@ -581,6 +582,7 @@ config DEVPORT
29366 bool
29367 depends on !M68K
29368 depends on ISA || PCI
29369+ depends on !GRKERNSEC_KMEM
29370 default y
29371
29372 source "drivers/s390/char/Kconfig"
29373diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
29374index 2e04433..22afc64 100644
29375--- a/drivers/char/agp/frontend.c
29376+++ b/drivers/char/agp/frontend.c
29377@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
29378 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
29379 return -EFAULT;
29380
29381- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
29382+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
29383 return -EFAULT;
29384
29385 client = agp_find_client_by_pid(reserve.pid);
29386diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
29387index 21cb980..f15107c 100644
29388--- a/drivers/char/genrtc.c
29389+++ b/drivers/char/genrtc.c
29390@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct file *file,
29391 switch (cmd) {
29392
29393 case RTC_PLL_GET:
29394+ memset(&pll, 0, sizeof(pll));
29395 if (get_rtc_pll(&pll))
29396 return -EINVAL;
29397 else
29398diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
29399index dfd7876..c0b0885 100644
29400--- a/drivers/char/hpet.c
29401+++ b/drivers/char/hpet.c
29402@@ -571,7 +571,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
29403 }
29404
29405 static int
29406-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
29407+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
29408 struct hpet_info *info)
29409 {
29410 struct hpet_timer __iomem *timer;
29411diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
29412index 2c29942..604c5ba 100644
29413--- a/drivers/char/ipmi/ipmi_msghandler.c
29414+++ b/drivers/char/ipmi/ipmi_msghandler.c
29415@@ -420,7 +420,7 @@ struct ipmi_smi {
29416 struct proc_dir_entry *proc_dir;
29417 char proc_dir_name[10];
29418
29419- atomic_t stats[IPMI_NUM_STATS];
29420+ atomic_unchecked_t stats[IPMI_NUM_STATS];
29421
29422 /*
29423 * run_to_completion duplicate of smb_info, smi_info
29424@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
29425
29426
29427 #define ipmi_inc_stat(intf, stat) \
29428- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
29429+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
29430 #define ipmi_get_stat(intf, stat) \
29431- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
29432+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
29433
29434 static int is_lan_addr(struct ipmi_addr *addr)
29435 {
29436@@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
29437 INIT_LIST_HEAD(&intf->cmd_rcvrs);
29438 init_waitqueue_head(&intf->waitq);
29439 for (i = 0; i < IPMI_NUM_STATS; i++)
29440- atomic_set(&intf->stats[i], 0);
29441+ atomic_set_unchecked(&intf->stats[i], 0);
29442
29443 intf->proc_dir = NULL;
29444
29445diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
29446index 1e638ff..a869ef5 100644
29447--- a/drivers/char/ipmi/ipmi_si_intf.c
29448+++ b/drivers/char/ipmi/ipmi_si_intf.c
29449@@ -275,7 +275,7 @@ struct smi_info {
29450 unsigned char slave_addr;
29451
29452 /* Counters and things for the proc filesystem. */
29453- atomic_t stats[SI_NUM_STATS];
29454+ atomic_unchecked_t stats[SI_NUM_STATS];
29455
29456 struct task_struct *thread;
29457
29458@@ -284,9 +284,9 @@ struct smi_info {
29459 };
29460
29461 #define smi_inc_stat(smi, stat) \
29462- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
29463+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
29464 #define smi_get_stat(smi, stat) \
29465- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
29466+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
29467
29468 #define SI_MAX_PARMS 4
29469
29470@@ -3209,7 +3209,7 @@ static int try_smi_init(struct smi_info *new_smi)
29471 atomic_set(&new_smi->req_events, 0);
29472 new_smi->run_to_completion = 0;
29473 for (i = 0; i < SI_NUM_STATS; i++)
29474- atomic_set(&new_smi->stats[i], 0);
29475+ atomic_set_unchecked(&new_smi->stats[i], 0);
29476
29477 new_smi->interrupt_disabled = 1;
29478 atomic_set(&new_smi->stop_operation, 0);
29479diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
29480index 47ff7e4..0c7d340 100644
29481--- a/drivers/char/mbcs.c
29482+++ b/drivers/char/mbcs.c
29483@@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
29484 return 0;
29485 }
29486
29487-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
29488+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
29489 {
29490 .part_num = MBCS_PART_NUM,
29491 .mfg_num = MBCS_MFG_NUM,
29492diff --git a/drivers/char/mem.c b/drivers/char/mem.c
29493index d6e9d08..4493e89 100644
29494--- a/drivers/char/mem.c
29495+++ b/drivers/char/mem.c
29496@@ -18,6 +18,7 @@
29497 #include <linux/raw.h>
29498 #include <linux/tty.h>
29499 #include <linux/capability.h>
29500+#include <linux/security.h>
29501 #include <linux/ptrace.h>
29502 #include <linux/device.h>
29503 #include <linux/highmem.h>
29504@@ -35,6 +36,10 @@
29505 # include <linux/efi.h>
29506 #endif
29507
29508+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29509+extern const struct file_operations grsec_fops;
29510+#endif
29511+
29512 static inline unsigned long size_inside_page(unsigned long start,
29513 unsigned long size)
29514 {
29515@@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29516
29517 while (cursor < to) {
29518 if (!devmem_is_allowed(pfn)) {
29519+#ifdef CONFIG_GRKERNSEC_KMEM
29520+ gr_handle_mem_readwrite(from, to);
29521+#else
29522 printk(KERN_INFO
29523 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
29524 current->comm, from, to);
29525+#endif
29526 return 0;
29527 }
29528 cursor += PAGE_SIZE;
29529@@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29530 }
29531 return 1;
29532 }
29533+#elif defined(CONFIG_GRKERNSEC_KMEM)
29534+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29535+{
29536+ return 0;
29537+}
29538 #else
29539 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29540 {
29541@@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29542
29543 while (count > 0) {
29544 unsigned long remaining;
29545+ char *temp;
29546
29547 sz = size_inside_page(p, count);
29548
29549@@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29550 if (!ptr)
29551 return -EFAULT;
29552
29553- remaining = copy_to_user(buf, ptr, sz);
29554+#ifdef CONFIG_PAX_USERCOPY
29555+ temp = kmalloc(sz, GFP_KERNEL);
29556+ if (!temp) {
29557+ unxlate_dev_mem_ptr(p, ptr);
29558+ return -ENOMEM;
29559+ }
29560+ memcpy(temp, ptr, sz);
29561+#else
29562+ temp = ptr;
29563+#endif
29564+
29565+ remaining = copy_to_user(buf, temp, sz);
29566+
29567+#ifdef CONFIG_PAX_USERCOPY
29568+ kfree(temp);
29569+#endif
29570+
29571 unxlate_dev_mem_ptr(p, ptr);
29572 if (remaining)
29573 return -EFAULT;
29574@@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29575 size_t count, loff_t *ppos)
29576 {
29577 unsigned long p = *ppos;
29578- ssize_t low_count, read, sz;
29579+ ssize_t low_count, read, sz, err = 0;
29580 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
29581- int err = 0;
29582
29583 read = 0;
29584 if (p < (unsigned long) high_memory) {
29585@@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29586 }
29587 #endif
29588 while (low_count > 0) {
29589+ char *temp;
29590+
29591 sz = size_inside_page(p, low_count);
29592
29593 /*
29594@@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29595 */
29596 kbuf = xlate_dev_kmem_ptr((char *)p);
29597
29598- if (copy_to_user(buf, kbuf, sz))
29599+#ifdef CONFIG_PAX_USERCOPY
29600+ temp = kmalloc(sz, GFP_KERNEL);
29601+ if (!temp)
29602+ return -ENOMEM;
29603+ memcpy(temp, kbuf, sz);
29604+#else
29605+ temp = kbuf;
29606+#endif
29607+
29608+ err = copy_to_user(buf, temp, sz);
29609+
29610+#ifdef CONFIG_PAX_USERCOPY
29611+ kfree(temp);
29612+#endif
29613+
29614+ if (err)
29615 return -EFAULT;
29616 buf += sz;
29617 p += sz;
29618@@ -867,6 +914,9 @@ static const struct memdev {
29619 #ifdef CONFIG_CRASH_DUMP
29620 [12] = { "oldmem", 0, &oldmem_fops, NULL },
29621 #endif
29622+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29623+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
29624+#endif
29625 };
29626
29627 static int memory_open(struct inode *inode, struct file *filp)
29628diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
29629index 9df78e2..01ba9ae 100644
29630--- a/drivers/char/nvram.c
29631+++ b/drivers/char/nvram.c
29632@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
29633
29634 spin_unlock_irq(&rtc_lock);
29635
29636- if (copy_to_user(buf, contents, tmp - contents))
29637+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
29638 return -EFAULT;
29639
29640 *ppos = i;
29641diff --git a/drivers/char/random.c b/drivers/char/random.c
29642index 4ec04a7..4a092ed 100644
29643--- a/drivers/char/random.c
29644+++ b/drivers/char/random.c
29645@@ -261,8 +261,13 @@
29646 /*
29647 * Configuration information
29648 */
29649+#ifdef CONFIG_GRKERNSEC_RANDNET
29650+#define INPUT_POOL_WORDS 512
29651+#define OUTPUT_POOL_WORDS 128
29652+#else
29653 #define INPUT_POOL_WORDS 128
29654 #define OUTPUT_POOL_WORDS 32
29655+#endif
29656 #define SEC_XFER_SIZE 512
29657 #define EXTRACT_SIZE 10
29658
29659@@ -300,10 +305,17 @@ static struct poolinfo {
29660 int poolwords;
29661 int tap1, tap2, tap3, tap4, tap5;
29662 } poolinfo_table[] = {
29663+#ifdef CONFIG_GRKERNSEC_RANDNET
29664+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
29665+ { 512, 411, 308, 208, 104, 1 },
29666+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
29667+ { 128, 103, 76, 51, 25, 1 },
29668+#else
29669 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
29670 { 128, 103, 76, 51, 25, 1 },
29671 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
29672 { 32, 26, 20, 14, 7, 1 },
29673+#endif
29674 #if 0
29675 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
29676 { 2048, 1638, 1231, 819, 411, 1 },
29677@@ -913,7 +925,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
29678
29679 extract_buf(r, tmp);
29680 i = min_t(int, nbytes, EXTRACT_SIZE);
29681- if (copy_to_user(buf, tmp, i)) {
29682+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
29683 ret = -EFAULT;
29684 break;
29685 }
29686@@ -1238,7 +1250,7 @@ EXPORT_SYMBOL(generate_random_uuid);
29687 #include <linux/sysctl.h>
29688
29689 static int min_read_thresh = 8, min_write_thresh;
29690-static int max_read_thresh = INPUT_POOL_WORDS * 32;
29691+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
29692 static int max_write_thresh = INPUT_POOL_WORDS * 32;
29693 static char sysctl_bootid[16];
29694
29695diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
29696index 45713f0..8286d21 100644
29697--- a/drivers/char/sonypi.c
29698+++ b/drivers/char/sonypi.c
29699@@ -54,6 +54,7 @@
29700
29701 #include <asm/uaccess.h>
29702 #include <asm/io.h>
29703+#include <asm/local.h>
29704
29705 #include <linux/sonypi.h>
29706
29707@@ -490,7 +491,7 @@ static struct sonypi_device {
29708 spinlock_t fifo_lock;
29709 wait_queue_head_t fifo_proc_list;
29710 struct fasync_struct *fifo_async;
29711- int open_count;
29712+ local_t open_count;
29713 int model;
29714 struct input_dev *input_jog_dev;
29715 struct input_dev *input_key_dev;
29716@@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
29717 static int sonypi_misc_release(struct inode *inode, struct file *file)
29718 {
29719 mutex_lock(&sonypi_device.lock);
29720- sonypi_device.open_count--;
29721+ local_dec(&sonypi_device.open_count);
29722 mutex_unlock(&sonypi_device.lock);
29723 return 0;
29724 }
29725@@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
29726 {
29727 mutex_lock(&sonypi_device.lock);
29728 /* Flush input queue on first open */
29729- if (!sonypi_device.open_count)
29730+ if (!local_read(&sonypi_device.open_count))
29731 kfifo_reset(&sonypi_device.fifo);
29732- sonypi_device.open_count++;
29733+ local_inc(&sonypi_device.open_count);
29734 mutex_unlock(&sonypi_device.lock);
29735
29736 return 0;
29737diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
29738index ad7c732..5aa8054 100644
29739--- a/drivers/char/tpm/tpm.c
29740+++ b/drivers/char/tpm/tpm.c
29741@@ -415,7 +415,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
29742 chip->vendor.req_complete_val)
29743 goto out_recv;
29744
29745- if ((status == chip->vendor.req_canceled)) {
29746+ if (status == chip->vendor.req_canceled) {
29747 dev_err(chip->dev, "Operation Canceled\n");
29748 rc = -ECANCELED;
29749 goto out;
29750diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
29751index 0636520..169c1d0 100644
29752--- a/drivers/char/tpm/tpm_bios.c
29753+++ b/drivers/char/tpm/tpm_bios.c
29754@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
29755 event = addr;
29756
29757 if ((event->event_type == 0 && event->event_size == 0) ||
29758- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
29759+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
29760 return NULL;
29761
29762 return addr;
29763@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
29764 return NULL;
29765
29766 if ((event->event_type == 0 && event->event_size == 0) ||
29767- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
29768+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
29769 return NULL;
29770
29771 (*pos)++;
29772@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
29773 int i;
29774
29775 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
29776- seq_putc(m, data[i]);
29777+ if (!seq_putc(m, data[i]))
29778+ return -EFAULT;
29779
29780 return 0;
29781 }
29782@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
29783 log->bios_event_log_end = log->bios_event_log + len;
29784
29785 virt = acpi_os_map_memory(start, len);
29786+ if (!virt) {
29787+ kfree(log->bios_event_log);
29788+ log->bios_event_log = NULL;
29789+ return -EFAULT;
29790+ }
29791
29792- memcpy(log->bios_event_log, virt, len);
29793+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
29794
29795 acpi_os_unmap_memory(virt, len);
29796 return 0;
29797diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
29798index cdf2f54..e55c197 100644
29799--- a/drivers/char/virtio_console.c
29800+++ b/drivers/char/virtio_console.c
29801@@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
29802 if (to_user) {
29803 ssize_t ret;
29804
29805- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
29806+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
29807 if (ret)
29808 return -EFAULT;
29809 } else {
29810@@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
29811 if (!port_has_data(port) && !port->host_connected)
29812 return 0;
29813
29814- return fill_readbuf(port, ubuf, count, true);
29815+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
29816 }
29817
29818 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
29819diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
29820index 97f5064..202b6e6 100644
29821--- a/drivers/edac/edac_pci_sysfs.c
29822+++ b/drivers/edac/edac_pci_sysfs.c
29823@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
29824 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
29825 static int edac_pci_poll_msec = 1000; /* one second workq period */
29826
29827-static atomic_t pci_parity_count = ATOMIC_INIT(0);
29828-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
29829+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
29830+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
29831
29832 static struct kobject *edac_pci_top_main_kobj;
29833 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
29834@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29835 edac_printk(KERN_CRIT, EDAC_PCI,
29836 "Signaled System Error on %s\n",
29837 pci_name(dev));
29838- atomic_inc(&pci_nonparity_count);
29839+ atomic_inc_unchecked(&pci_nonparity_count);
29840 }
29841
29842 if (status & (PCI_STATUS_PARITY)) {
29843@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29844 "Master Data Parity Error on %s\n",
29845 pci_name(dev));
29846
29847- atomic_inc(&pci_parity_count);
29848+ atomic_inc_unchecked(&pci_parity_count);
29849 }
29850
29851 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29852@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29853 "Detected Parity Error on %s\n",
29854 pci_name(dev));
29855
29856- atomic_inc(&pci_parity_count);
29857+ atomic_inc_unchecked(&pci_parity_count);
29858 }
29859 }
29860
29861@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29862 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
29863 "Signaled System Error on %s\n",
29864 pci_name(dev));
29865- atomic_inc(&pci_nonparity_count);
29866+ atomic_inc_unchecked(&pci_nonparity_count);
29867 }
29868
29869 if (status & (PCI_STATUS_PARITY)) {
29870@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29871 "Master Data Parity Error on "
29872 "%s\n", pci_name(dev));
29873
29874- atomic_inc(&pci_parity_count);
29875+ atomic_inc_unchecked(&pci_parity_count);
29876 }
29877
29878 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29879@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29880 "Detected Parity Error on %s\n",
29881 pci_name(dev));
29882
29883- atomic_inc(&pci_parity_count);
29884+ atomic_inc_unchecked(&pci_parity_count);
29885 }
29886 }
29887 }
29888@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
29889 if (!check_pci_errors)
29890 return;
29891
29892- before_count = atomic_read(&pci_parity_count);
29893+ before_count = atomic_read_unchecked(&pci_parity_count);
29894
29895 /* scan all PCI devices looking for a Parity Error on devices and
29896 * bridges.
29897@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
29898 /* Only if operator has selected panic on PCI Error */
29899 if (edac_pci_get_panic_on_pe()) {
29900 /* If the count is different 'after' from 'before' */
29901- if (before_count != atomic_read(&pci_parity_count))
29902+ if (before_count != atomic_read_unchecked(&pci_parity_count))
29903 panic("EDAC: PCI Parity Error");
29904 }
29905 }
29906diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
29907index c6074c5..88a9e2e 100644
29908--- a/drivers/edac/mce_amd.h
29909+++ b/drivers/edac/mce_amd.h
29910@@ -82,7 +82,7 @@ extern const char * const ii_msgs[];
29911 struct amd_decoder_ops {
29912 bool (*dc_mce)(u16, u8);
29913 bool (*ic_mce)(u16, u8);
29914-};
29915+} __no_const;
29916
29917 void amd_report_gart_errors(bool);
29918 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
29919diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
29920index cc595eb..4ec702a 100644
29921--- a/drivers/firewire/core-card.c
29922+++ b/drivers/firewire/core-card.c
29923@@ -679,7 +679,7 @@ void fw_card_release(struct kref *kref)
29924
29925 void fw_core_remove_card(struct fw_card *card)
29926 {
29927- struct fw_card_driver dummy_driver = dummy_driver_template;
29928+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
29929
29930 card->driver->update_phy_reg(card, 4,
29931 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
29932diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
29933index 2e6b245..c3857d9 100644
29934--- a/drivers/firewire/core-cdev.c
29935+++ b/drivers/firewire/core-cdev.c
29936@@ -1341,8 +1341,7 @@ static int init_iso_resource(struct client *client,
29937 int ret;
29938
29939 if ((request->channels == 0 && request->bandwidth == 0) ||
29940- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
29941- request->bandwidth < 0)
29942+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
29943 return -EINVAL;
29944
29945 r = kmalloc(sizeof(*r), GFP_KERNEL);
29946diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
29947index dea2dcc..a4fb978 100644
29948--- a/drivers/firewire/core-transaction.c
29949+++ b/drivers/firewire/core-transaction.c
29950@@ -37,6 +37,7 @@
29951 #include <linux/timer.h>
29952 #include <linux/types.h>
29953 #include <linux/workqueue.h>
29954+#include <linux/sched.h>
29955
29956 #include <asm/byteorder.h>
29957
29958diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
29959index 9047f55..e47c7ff 100644
29960--- a/drivers/firewire/core.h
29961+++ b/drivers/firewire/core.h
29962@@ -110,6 +110,7 @@ struct fw_card_driver {
29963
29964 int (*stop_iso)(struct fw_iso_context *ctx);
29965 };
29966+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
29967
29968 void fw_card_initialize(struct fw_card *card,
29969 const struct fw_card_driver *driver, struct device *device);
29970diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
29971index 153980b..4b4d046 100644
29972--- a/drivers/firmware/dmi_scan.c
29973+++ b/drivers/firmware/dmi_scan.c
29974@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
29975 }
29976 }
29977 else {
29978- /*
29979- * no iounmap() for that ioremap(); it would be a no-op, but
29980- * it's so early in setup that sucker gets confused into doing
29981- * what it shouldn't if we actually call it.
29982- */
29983 p = dmi_ioremap(0xF0000, 0x10000);
29984 if (p == NULL)
29985 goto error;
29986@@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
29987 if (buf == NULL)
29988 return -1;
29989
29990- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
29991+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
29992
29993 iounmap(buf);
29994 return 0;
29995diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
29996index 82d5c20..44a7177 100644
29997--- a/drivers/gpio/gpio-vr41xx.c
29998+++ b/drivers/gpio/gpio-vr41xx.c
29999@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
30000 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
30001 maskl, pendl, maskh, pendh);
30002
30003- atomic_inc(&irq_err_count);
30004+ atomic_inc_unchecked(&irq_err_count);
30005
30006 return -EINVAL;
30007 }
30008diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
30009index 8111889..367b253 100644
30010--- a/drivers/gpu/drm/drm_crtc_helper.c
30011+++ b/drivers/gpu/drm/drm_crtc_helper.c
30012@@ -286,7 +286,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
30013 struct drm_crtc *tmp;
30014 int crtc_mask = 1;
30015
30016- WARN(!crtc, "checking null crtc?\n");
30017+ BUG_ON(!crtc);
30018
30019 dev = crtc->dev;
30020
30021diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
30022index 6116e3b..c29dd16 100644
30023--- a/drivers/gpu/drm/drm_drv.c
30024+++ b/drivers/gpu/drm/drm_drv.c
30025@@ -316,7 +316,7 @@ module_exit(drm_core_exit);
30026 /**
30027 * Copy and IOCTL return string to user space
30028 */
30029-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
30030+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
30031 {
30032 int len;
30033
30034@@ -399,7 +399,7 @@ long drm_ioctl(struct file *filp,
30035 return -ENODEV;
30036
30037 atomic_inc(&dev->ioctl_count);
30038- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
30039+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
30040 ++file_priv->ioctl_count;
30041
30042 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
30043diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
30044index 123de28..43a0897 100644
30045--- a/drivers/gpu/drm/drm_fops.c
30046+++ b/drivers/gpu/drm/drm_fops.c
30047@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
30048 }
30049
30050 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
30051- atomic_set(&dev->counts[i], 0);
30052+ atomic_set_unchecked(&dev->counts[i], 0);
30053
30054 dev->sigdata.lock = NULL;
30055
30056@@ -138,8 +138,8 @@ int drm_open(struct inode *inode, struct file *filp)
30057
30058 retcode = drm_open_helper(inode, filp, dev);
30059 if (!retcode) {
30060- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
30061- if (!dev->open_count++)
30062+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
30063+ if (local_inc_return(&dev->open_count) == 1)
30064 retcode = drm_setup(dev);
30065 }
30066 if (!retcode) {
30067@@ -482,7 +482,7 @@ int drm_release(struct inode *inode, struct file *filp)
30068
30069 mutex_lock(&drm_global_mutex);
30070
30071- DRM_DEBUG("open_count = %d\n", dev->open_count);
30072+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
30073
30074 if (dev->driver->preclose)
30075 dev->driver->preclose(dev, file_priv);
30076@@ -491,10 +491,10 @@ int drm_release(struct inode *inode, struct file *filp)
30077 * Begin inline drm_release
30078 */
30079
30080- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
30081+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
30082 task_pid_nr(current),
30083 (long)old_encode_dev(file_priv->minor->device),
30084- dev->open_count);
30085+ local_read(&dev->open_count));
30086
30087 /* Release any auth tokens that might point to this file_priv,
30088 (do that under the drm_global_mutex) */
30089@@ -584,8 +584,8 @@ int drm_release(struct inode *inode, struct file *filp)
30090 * End inline drm_release
30091 */
30092
30093- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
30094- if (!--dev->open_count) {
30095+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
30096+ if (local_dec_and_test(&dev->open_count)) {
30097 if (atomic_read(&dev->ioctl_count)) {
30098 DRM_ERROR("Device busy: %d\n",
30099 atomic_read(&dev->ioctl_count));
30100diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
30101index c87dc96..326055d 100644
30102--- a/drivers/gpu/drm/drm_global.c
30103+++ b/drivers/gpu/drm/drm_global.c
30104@@ -36,7 +36,7 @@
30105 struct drm_global_item {
30106 struct mutex mutex;
30107 void *object;
30108- int refcount;
30109+ atomic_t refcount;
30110 };
30111
30112 static struct drm_global_item glob[DRM_GLOBAL_NUM];
30113@@ -49,7 +49,7 @@ void drm_global_init(void)
30114 struct drm_global_item *item = &glob[i];
30115 mutex_init(&item->mutex);
30116 item->object = NULL;
30117- item->refcount = 0;
30118+ atomic_set(&item->refcount, 0);
30119 }
30120 }
30121
30122@@ -59,7 +59,7 @@ void drm_global_release(void)
30123 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
30124 struct drm_global_item *item = &glob[i];
30125 BUG_ON(item->object != NULL);
30126- BUG_ON(item->refcount != 0);
30127+ BUG_ON(atomic_read(&item->refcount) != 0);
30128 }
30129 }
30130
30131@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30132 void *object;
30133
30134 mutex_lock(&item->mutex);
30135- if (item->refcount == 0) {
30136+ if (atomic_read(&item->refcount) == 0) {
30137 item->object = kzalloc(ref->size, GFP_KERNEL);
30138 if (unlikely(item->object == NULL)) {
30139 ret = -ENOMEM;
30140@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30141 goto out_err;
30142
30143 }
30144- ++item->refcount;
30145+ atomic_inc(&item->refcount);
30146 ref->object = item->object;
30147 object = item->object;
30148 mutex_unlock(&item->mutex);
30149@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
30150 struct drm_global_item *item = &glob[ref->global_type];
30151
30152 mutex_lock(&item->mutex);
30153- BUG_ON(item->refcount == 0);
30154+ BUG_ON(atomic_read(&item->refcount) == 0);
30155 BUG_ON(ref->object != item->object);
30156- if (--item->refcount == 0) {
30157+ if (atomic_dec_and_test(&item->refcount)) {
30158 ref->release(ref);
30159 item->object = NULL;
30160 }
30161diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
30162index ab1162d..42587b2 100644
30163--- a/drivers/gpu/drm/drm_info.c
30164+++ b/drivers/gpu/drm/drm_info.c
30165@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
30166 struct drm_local_map *map;
30167 struct drm_map_list *r_list;
30168
30169- /* Hardcoded from _DRM_FRAME_BUFFER,
30170- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
30171- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
30172- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
30173+ static const char * const types[] = {
30174+ [_DRM_FRAME_BUFFER] = "FB",
30175+ [_DRM_REGISTERS] = "REG",
30176+ [_DRM_SHM] = "SHM",
30177+ [_DRM_AGP] = "AGP",
30178+ [_DRM_SCATTER_GATHER] = "SG",
30179+ [_DRM_CONSISTENT] = "PCI",
30180+ [_DRM_GEM] = "GEM" };
30181 const char *type;
30182 int i;
30183
30184@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
30185 map = r_list->map;
30186 if (!map)
30187 continue;
30188- if (map->type < 0 || map->type > 5)
30189+ if (map->type >= ARRAY_SIZE(types))
30190 type = "??";
30191 else
30192 type = types[map->type];
30193@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
30194 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
30195 vma->vm_flags & VM_LOCKED ? 'l' : '-',
30196 vma->vm_flags & VM_IO ? 'i' : '-',
30197+#ifdef CONFIG_GRKERNSEC_HIDESYM
30198+ 0);
30199+#else
30200 vma->vm_pgoff);
30201+#endif
30202
30203 #if defined(__i386__)
30204 pgprot = pgprot_val(vma->vm_page_prot);
30205diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
30206index 637fcc3..e890b33 100644
30207--- a/drivers/gpu/drm/drm_ioc32.c
30208+++ b/drivers/gpu/drm/drm_ioc32.c
30209@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
30210 request = compat_alloc_user_space(nbytes);
30211 if (!access_ok(VERIFY_WRITE, request, nbytes))
30212 return -EFAULT;
30213- list = (struct drm_buf_desc *) (request + 1);
30214+ list = (struct drm_buf_desc __user *) (request + 1);
30215
30216 if (__put_user(count, &request->count)
30217 || __put_user(list, &request->list))
30218@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
30219 request = compat_alloc_user_space(nbytes);
30220 if (!access_ok(VERIFY_WRITE, request, nbytes))
30221 return -EFAULT;
30222- list = (struct drm_buf_pub *) (request + 1);
30223+ list = (struct drm_buf_pub __user *) (request + 1);
30224
30225 if (__put_user(count, &request->count)
30226 || __put_user(list, &request->list))
30227diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
30228index cf85155..f2665cb 100644
30229--- a/drivers/gpu/drm/drm_ioctl.c
30230+++ b/drivers/gpu/drm/drm_ioctl.c
30231@@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
30232 stats->data[i].value =
30233 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
30234 else
30235- stats->data[i].value = atomic_read(&dev->counts[i]);
30236+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
30237 stats->data[i].type = dev->types[i];
30238 }
30239
30240diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
30241index c79c713..2048588 100644
30242--- a/drivers/gpu/drm/drm_lock.c
30243+++ b/drivers/gpu/drm/drm_lock.c
30244@@ -90,7 +90,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30245 if (drm_lock_take(&master->lock, lock->context)) {
30246 master->lock.file_priv = file_priv;
30247 master->lock.lock_time = jiffies;
30248- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
30249+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
30250 break; /* Got lock */
30251 }
30252
30253@@ -161,7 +161,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30254 return -EINVAL;
30255 }
30256
30257- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
30258+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
30259
30260 if (drm_lock_free(&master->lock, lock->context)) {
30261 /* FIXME: Should really bail out here. */
30262diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
30263index aa454f8..6d38580 100644
30264--- a/drivers/gpu/drm/drm_stub.c
30265+++ b/drivers/gpu/drm/drm_stub.c
30266@@ -512,7 +512,7 @@ void drm_unplug_dev(struct drm_device *dev)
30267
30268 drm_device_set_unplugged(dev);
30269
30270- if (dev->open_count == 0) {
30271+ if (local_read(&dev->open_count) == 0) {
30272 drm_put_dev(dev);
30273 }
30274 mutex_unlock(&drm_global_mutex);
30275diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
30276index f920fb5..001c52d 100644
30277--- a/drivers/gpu/drm/i810/i810_dma.c
30278+++ b/drivers/gpu/drm/i810/i810_dma.c
30279@@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
30280 dma->buflist[vertex->idx],
30281 vertex->discard, vertex->used);
30282
30283- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30284- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30285+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30286+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30287 sarea_priv->last_enqueue = dev_priv->counter - 1;
30288 sarea_priv->last_dispatch = (int)hw_status[5];
30289
30290@@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
30291 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
30292 mc->last_render);
30293
30294- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30295- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30296+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30297+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30298 sarea_priv->last_enqueue = dev_priv->counter - 1;
30299 sarea_priv->last_dispatch = (int)hw_status[5];
30300
30301diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
30302index c9339f4..f5e1b9d 100644
30303--- a/drivers/gpu/drm/i810/i810_drv.h
30304+++ b/drivers/gpu/drm/i810/i810_drv.h
30305@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
30306 int page_flipping;
30307
30308 wait_queue_head_t irq_queue;
30309- atomic_t irq_received;
30310- atomic_t irq_emitted;
30311+ atomic_unchecked_t irq_received;
30312+ atomic_unchecked_t irq_emitted;
30313
30314 int front_offset;
30315 } drm_i810_private_t;
30316diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
30317index e6162a1..b2ff486 100644
30318--- a/drivers/gpu/drm/i915/i915_debugfs.c
30319+++ b/drivers/gpu/drm/i915/i915_debugfs.c
30320@@ -500,7 +500,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
30321 I915_READ(GTIMR));
30322 }
30323 seq_printf(m, "Interrupts received: %d\n",
30324- atomic_read(&dev_priv->irq_received));
30325+ atomic_read_unchecked(&dev_priv->irq_received));
30326 for (i = 0; i < I915_NUM_RINGS; i++) {
30327 if (IS_GEN6(dev) || IS_GEN7(dev)) {
30328 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
30329@@ -1313,7 +1313,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
30330 return ret;
30331
30332 if (opregion->header)
30333- seq_write(m, opregion->header, OPREGION_SIZE);
30334+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
30335
30336 mutex_unlock(&dev->struct_mutex);
30337
30338diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
30339index ba60f3c..e2dff7f 100644
30340--- a/drivers/gpu/drm/i915/i915_dma.c
30341+++ b/drivers/gpu/drm/i915/i915_dma.c
30342@@ -1178,7 +1178,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
30343 bool can_switch;
30344
30345 spin_lock(&dev->count_lock);
30346- can_switch = (dev->open_count == 0);
30347+ can_switch = (local_read(&dev->open_count) == 0);
30348 spin_unlock(&dev->count_lock);
30349 return can_switch;
30350 }
30351diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
30352index 5fabc6c..0b08aa1 100644
30353--- a/drivers/gpu/drm/i915/i915_drv.h
30354+++ b/drivers/gpu/drm/i915/i915_drv.h
30355@@ -240,7 +240,7 @@ struct drm_i915_display_funcs {
30356 /* render clock increase/decrease */
30357 /* display clock increase/decrease */
30358 /* pll clock increase/decrease */
30359-};
30360+} __no_const;
30361
30362 struct intel_device_info {
30363 u8 gen;
30364@@ -350,7 +350,7 @@ typedef struct drm_i915_private {
30365 int current_page;
30366 int page_flipping;
30367
30368- atomic_t irq_received;
30369+ atomic_unchecked_t irq_received;
30370
30371 /* protects the irq masks */
30372 spinlock_t irq_lock;
30373@@ -937,7 +937,7 @@ struct drm_i915_gem_object {
30374 * will be page flipped away on the next vblank. When it
30375 * reaches 0, dev_priv->pending_flip_queue will be woken up.
30376 */
30377- atomic_t pending_flip;
30378+ atomic_unchecked_t pending_flip;
30379 };
30380
30381 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
30382@@ -1359,7 +1359,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
30383 extern void intel_teardown_gmbus(struct drm_device *dev);
30384 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
30385 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
30386-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30387+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30388 {
30389 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
30390 }
30391diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30392index de43194..a14c4cc 100644
30393--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30394+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30395@@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
30396 i915_gem_clflush_object(obj);
30397
30398 if (obj->base.pending_write_domain)
30399- cd->flips |= atomic_read(&obj->pending_flip);
30400+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
30401
30402 /* The actual obj->write_domain will be updated with
30403 * pending_write_domain after we emit the accumulated flush for all
30404@@ -933,9 +933,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
30405
30406 static int
30407 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
30408- int count)
30409+ unsigned int count)
30410 {
30411- int i;
30412+ unsigned int i;
30413
30414 for (i = 0; i < count; i++) {
30415 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
30416diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
30417index f57e5cf..c82f79d 100644
30418--- a/drivers/gpu/drm/i915/i915_irq.c
30419+++ b/drivers/gpu/drm/i915/i915_irq.c
30420@@ -472,7 +472,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
30421 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
30422 struct drm_i915_master_private *master_priv;
30423
30424- atomic_inc(&dev_priv->irq_received);
30425+ atomic_inc_unchecked(&dev_priv->irq_received);
30426
30427 /* disable master interrupt before clearing iir */
30428 de_ier = I915_READ(DEIER);
30429@@ -563,7 +563,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
30430 struct drm_i915_master_private *master_priv;
30431 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
30432
30433- atomic_inc(&dev_priv->irq_received);
30434+ atomic_inc_unchecked(&dev_priv->irq_received);
30435
30436 if (IS_GEN6(dev))
30437 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
30438@@ -1292,7 +1292,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
30439 int ret = IRQ_NONE, pipe;
30440 bool blc_event = false;
30441
30442- atomic_inc(&dev_priv->irq_received);
30443+ atomic_inc_unchecked(&dev_priv->irq_received);
30444
30445 iir = I915_READ(IIR);
30446
30447@@ -1803,7 +1803,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
30448 {
30449 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30450
30451- atomic_set(&dev_priv->irq_received, 0);
30452+ atomic_set_unchecked(&dev_priv->irq_received, 0);
30453
30454 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
30455 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
30456@@ -1980,7 +1980,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
30457 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30458 int pipe;
30459
30460- atomic_set(&dev_priv->irq_received, 0);
30461+ atomic_set_unchecked(&dev_priv->irq_received, 0);
30462
30463 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
30464 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
30465diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
30466index d4d162f..e80037c 100644
30467--- a/drivers/gpu/drm/i915/intel_display.c
30468+++ b/drivers/gpu/drm/i915/intel_display.c
30469@@ -2254,7 +2254,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
30470
30471 wait_event(dev_priv->pending_flip_queue,
30472 atomic_read(&dev_priv->mm.wedged) ||
30473- atomic_read(&obj->pending_flip) == 0);
30474+ atomic_read_unchecked(&obj->pending_flip) == 0);
30475
30476 /* Big Hammer, we also need to ensure that any pending
30477 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
30478@@ -2919,7 +2919,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
30479 obj = to_intel_framebuffer(crtc->fb)->obj;
30480 dev_priv = crtc->dev->dev_private;
30481 wait_event(dev_priv->pending_flip_queue,
30482- atomic_read(&obj->pending_flip) == 0);
30483+ atomic_read_unchecked(&obj->pending_flip) == 0);
30484 }
30485
30486 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
30487@@ -7286,7 +7286,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
30488
30489 atomic_clear_mask(1 << intel_crtc->plane,
30490 &obj->pending_flip.counter);
30491- if (atomic_read(&obj->pending_flip) == 0)
30492+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
30493 wake_up(&dev_priv->pending_flip_queue);
30494
30495 schedule_work(&work->work);
30496@@ -7582,7 +7582,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30497 /* Block clients from rendering to the new back buffer until
30498 * the flip occurs and the object is no longer visible.
30499 */
30500- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30501+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30502
30503 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
30504 if (ret)
30505@@ -7596,7 +7596,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30506 return 0;
30507
30508 cleanup_pending:
30509- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30510+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30511 drm_gem_object_unreference(&work->old_fb_obj->base);
30512 drm_gem_object_unreference(&obj->base);
30513 mutex_unlock(&dev->struct_mutex);
30514diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
30515index 54558a0..2d97005 100644
30516--- a/drivers/gpu/drm/mga/mga_drv.h
30517+++ b/drivers/gpu/drm/mga/mga_drv.h
30518@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
30519 u32 clear_cmd;
30520 u32 maccess;
30521
30522- atomic_t vbl_received; /**< Number of vblanks received. */
30523+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
30524 wait_queue_head_t fence_queue;
30525- atomic_t last_fence_retired;
30526+ atomic_unchecked_t last_fence_retired;
30527 u32 next_fence_to_post;
30528
30529 unsigned int fb_cpp;
30530diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
30531index 2581202..f230a8d9 100644
30532--- a/drivers/gpu/drm/mga/mga_irq.c
30533+++ b/drivers/gpu/drm/mga/mga_irq.c
30534@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
30535 if (crtc != 0)
30536 return 0;
30537
30538- return atomic_read(&dev_priv->vbl_received);
30539+ return atomic_read_unchecked(&dev_priv->vbl_received);
30540 }
30541
30542
30543@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30544 /* VBLANK interrupt */
30545 if (status & MGA_VLINEPEN) {
30546 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
30547- atomic_inc(&dev_priv->vbl_received);
30548+ atomic_inc_unchecked(&dev_priv->vbl_received);
30549 drm_handle_vblank(dev, 0);
30550 handled = 1;
30551 }
30552@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30553 if ((prim_start & ~0x03) != (prim_end & ~0x03))
30554 MGA_WRITE(MGA_PRIMEND, prim_end);
30555
30556- atomic_inc(&dev_priv->last_fence_retired);
30557+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
30558 DRM_WAKEUP(&dev_priv->fence_queue);
30559 handled = 1;
30560 }
30561@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
30562 * using fences.
30563 */
30564 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
30565- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
30566+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
30567 - *sequence) <= (1 << 23)));
30568
30569 *sequence = cur_fence;
30570diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
30571index 0be4a81..7464804 100644
30572--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
30573+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
30574@@ -5329,7 +5329,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
30575 struct bit_table {
30576 const char id;
30577 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
30578-};
30579+} __no_const;
30580
30581 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
30582
30583diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
30584index 3aef353..0ad1322 100644
30585--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
30586+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
30587@@ -240,7 +240,7 @@ struct nouveau_channel {
30588 struct list_head pending;
30589 uint32_t sequence;
30590 uint32_t sequence_ack;
30591- atomic_t last_sequence_irq;
30592+ atomic_unchecked_t last_sequence_irq;
30593 struct nouveau_vma vma;
30594 } fence;
30595
30596@@ -321,7 +321,7 @@ struct nouveau_exec_engine {
30597 u32 handle, u16 class);
30598 void (*set_tile_region)(struct drm_device *dev, int i);
30599 void (*tlb_flush)(struct drm_device *, int engine);
30600-};
30601+} __no_const;
30602
30603 struct nouveau_instmem_engine {
30604 void *priv;
30605@@ -343,13 +343,13 @@ struct nouveau_instmem_engine {
30606 struct nouveau_mc_engine {
30607 int (*init)(struct drm_device *dev);
30608 void (*takedown)(struct drm_device *dev);
30609-};
30610+} __no_const;
30611
30612 struct nouveau_timer_engine {
30613 int (*init)(struct drm_device *dev);
30614 void (*takedown)(struct drm_device *dev);
30615 uint64_t (*read)(struct drm_device *dev);
30616-};
30617+} __no_const;
30618
30619 struct nouveau_fb_engine {
30620 int num_tiles;
30621@@ -590,7 +590,7 @@ struct nouveau_vram_engine {
30622 void (*put)(struct drm_device *, struct nouveau_mem **);
30623
30624 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
30625-};
30626+} __no_const;
30627
30628 struct nouveau_engine {
30629 struct nouveau_instmem_engine instmem;
30630@@ -739,7 +739,7 @@ struct drm_nouveau_private {
30631 struct drm_global_reference mem_global_ref;
30632 struct ttm_bo_global_ref bo_global_ref;
30633 struct ttm_bo_device bdev;
30634- atomic_t validate_sequence;
30635+ atomic_unchecked_t validate_sequence;
30636 } ttm;
30637
30638 struct {
30639diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
30640index c1dc20f..4df673c 100644
30641--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
30642+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
30643@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
30644 if (USE_REFCNT(dev))
30645 sequence = nvchan_rd32(chan, 0x48);
30646 else
30647- sequence = atomic_read(&chan->fence.last_sequence_irq);
30648+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
30649
30650 if (chan->fence.sequence_ack == sequence)
30651 goto out;
30652@@ -538,7 +538,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
30653 return ret;
30654 }
30655
30656- atomic_set(&chan->fence.last_sequence_irq, 0);
30657+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
30658 return 0;
30659 }
30660
30661diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
30662index ed52a6f..484acdc 100644
30663--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
30664+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
30665@@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
30666 int trycnt = 0;
30667 int ret, i;
30668
30669- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
30670+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
30671 retry:
30672 if (++trycnt > 100000) {
30673 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
30674diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
30675index c2a8511..4b996f9 100644
30676--- a/drivers/gpu/drm/nouveau/nouveau_state.c
30677+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
30678@@ -588,7 +588,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
30679 bool can_switch;
30680
30681 spin_lock(&dev->count_lock);
30682- can_switch = (dev->open_count == 0);
30683+ can_switch = (local_read(&dev->open_count) == 0);
30684 spin_unlock(&dev->count_lock);
30685 return can_switch;
30686 }
30687diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
30688index dbdea8e..cd6eeeb 100644
30689--- a/drivers/gpu/drm/nouveau/nv04_graph.c
30690+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
30691@@ -554,7 +554,7 @@ static int
30692 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
30693 u32 class, u32 mthd, u32 data)
30694 {
30695- atomic_set(&chan->fence.last_sequence_irq, data);
30696+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
30697 return 0;
30698 }
30699
30700diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
30701index 2746402..c8dc4a4 100644
30702--- a/drivers/gpu/drm/nouveau/nv50_sor.c
30703+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
30704@@ -304,7 +304,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
30705 }
30706
30707 if (nv_encoder->dcb->type == OUTPUT_DP) {
30708- struct dp_train_func func = {
30709+ static struct dp_train_func func = {
30710 .link_set = nv50_sor_dp_link_set,
30711 .train_set = nv50_sor_dp_train_set,
30712 .train_adj = nv50_sor_dp_train_adj
30713diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
30714index 0247250..d2f6aaf 100644
30715--- a/drivers/gpu/drm/nouveau/nvd0_display.c
30716+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
30717@@ -1366,7 +1366,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
30718 nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
30719
30720 if (nv_encoder->dcb->type == OUTPUT_DP) {
30721- struct dp_train_func func = {
30722+ static struct dp_train_func func = {
30723 .link_set = nvd0_sor_dp_link_set,
30724 .train_set = nvd0_sor_dp_train_set,
30725 .train_adj = nvd0_sor_dp_train_adj
30726diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
30727index bcac90b..53bfc76 100644
30728--- a/drivers/gpu/drm/r128/r128_cce.c
30729+++ b/drivers/gpu/drm/r128/r128_cce.c
30730@@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
30731
30732 /* GH: Simple idle check.
30733 */
30734- atomic_set(&dev_priv->idle_count, 0);
30735+ atomic_set_unchecked(&dev_priv->idle_count, 0);
30736
30737 /* We don't support anything other than bus-mastering ring mode,
30738 * but the ring can be in either AGP or PCI space for the ring
30739diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
30740index 930c71b..499aded 100644
30741--- a/drivers/gpu/drm/r128/r128_drv.h
30742+++ b/drivers/gpu/drm/r128/r128_drv.h
30743@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
30744 int is_pci;
30745 unsigned long cce_buffers_offset;
30746
30747- atomic_t idle_count;
30748+ atomic_unchecked_t idle_count;
30749
30750 int page_flipping;
30751 int current_page;
30752 u32 crtc_offset;
30753 u32 crtc_offset_cntl;
30754
30755- atomic_t vbl_received;
30756+ atomic_unchecked_t vbl_received;
30757
30758 u32 color_fmt;
30759 unsigned int front_offset;
30760diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
30761index 429d5a0..7e899ed 100644
30762--- a/drivers/gpu/drm/r128/r128_irq.c
30763+++ b/drivers/gpu/drm/r128/r128_irq.c
30764@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
30765 if (crtc != 0)
30766 return 0;
30767
30768- return atomic_read(&dev_priv->vbl_received);
30769+ return atomic_read_unchecked(&dev_priv->vbl_received);
30770 }
30771
30772 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30773@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30774 /* VBLANK interrupt */
30775 if (status & R128_CRTC_VBLANK_INT) {
30776 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
30777- atomic_inc(&dev_priv->vbl_received);
30778+ atomic_inc_unchecked(&dev_priv->vbl_received);
30779 drm_handle_vblank(dev, 0);
30780 return IRQ_HANDLED;
30781 }
30782diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
30783index a9e33ce..09edd4b 100644
30784--- a/drivers/gpu/drm/r128/r128_state.c
30785+++ b/drivers/gpu/drm/r128/r128_state.c
30786@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
30787
30788 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
30789 {
30790- if (atomic_read(&dev_priv->idle_count) == 0)
30791+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
30792 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
30793 else
30794- atomic_set(&dev_priv->idle_count, 0);
30795+ atomic_set_unchecked(&dev_priv->idle_count, 0);
30796 }
30797
30798 #endif
30799diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
30800index 5a82b6b..9e69c73 100644
30801--- a/drivers/gpu/drm/radeon/mkregtable.c
30802+++ b/drivers/gpu/drm/radeon/mkregtable.c
30803@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
30804 regex_t mask_rex;
30805 regmatch_t match[4];
30806 char buf[1024];
30807- size_t end;
30808+ long end;
30809 int len;
30810 int done = 0;
30811 int r;
30812 unsigned o;
30813 struct offset *offset;
30814 char last_reg_s[10];
30815- int last_reg;
30816+ unsigned long last_reg;
30817
30818 if (regcomp
30819 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
30820diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
30821index 138b952..d74f9cb 100644
30822--- a/drivers/gpu/drm/radeon/radeon.h
30823+++ b/drivers/gpu/drm/radeon/radeon.h
30824@@ -253,7 +253,7 @@ struct radeon_fence_driver {
30825 uint32_t scratch_reg;
30826 uint64_t gpu_addr;
30827 volatile uint32_t *cpu_addr;
30828- atomic_t seq;
30829+ atomic_unchecked_t seq;
30830 uint32_t last_seq;
30831 unsigned long last_jiffies;
30832 unsigned long last_timeout;
30833@@ -753,7 +753,7 @@ struct r600_blit_cp_primitives {
30834 int x2, int y2);
30835 void (*draw_auto)(struct radeon_device *rdev);
30836 void (*set_default_state)(struct radeon_device *rdev);
30837-};
30838+} __no_const;
30839
30840 struct r600_blit {
30841 struct mutex mutex;
30842@@ -1246,7 +1246,7 @@ struct radeon_asic {
30843 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
30844 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
30845 } pflip;
30846-};
30847+} __no_const;
30848
30849 /*
30850 * Asic structures
30851diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
30852index 5992502..c19c633 100644
30853--- a/drivers/gpu/drm/radeon/radeon_device.c
30854+++ b/drivers/gpu/drm/radeon/radeon_device.c
30855@@ -691,7 +691,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
30856 bool can_switch;
30857
30858 spin_lock(&dev->count_lock);
30859- can_switch = (dev->open_count == 0);
30860+ can_switch = (local_read(&dev->open_count) == 0);
30861 spin_unlock(&dev->count_lock);
30862 return can_switch;
30863 }
30864diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
30865index a1b59ca..86f2d44 100644
30866--- a/drivers/gpu/drm/radeon/radeon_drv.h
30867+++ b/drivers/gpu/drm/radeon/radeon_drv.h
30868@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
30869
30870 /* SW interrupt */
30871 wait_queue_head_t swi_queue;
30872- atomic_t swi_emitted;
30873+ atomic_unchecked_t swi_emitted;
30874 int vblank_crtc;
30875 uint32_t irq_enable_reg;
30876 uint32_t r500_disp_irq_reg;
30877diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
30878index 4bd36a3..e66fe9c 100644
30879--- a/drivers/gpu/drm/radeon/radeon_fence.c
30880+++ b/drivers/gpu/drm/radeon/radeon_fence.c
30881@@ -70,7 +70,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
30882 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
30883 return 0;
30884 }
30885- fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
30886+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv[fence->ring].seq);
30887 if (!rdev->ring[fence->ring].ready)
30888 /* FIXME: cp is not running assume everythings is done right
30889 * away
30890@@ -405,7 +405,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
30891 }
30892 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
30893 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
30894- radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
30895+ radeon_fence_write(rdev, atomic_read_unchecked(&rdev->fence_drv[ring].seq), ring);
30896 rdev->fence_drv[ring].initialized = true;
30897 DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
30898 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
30899@@ -418,7 +418,7 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
30900 rdev->fence_drv[ring].scratch_reg = -1;
30901 rdev->fence_drv[ring].cpu_addr = NULL;
30902 rdev->fence_drv[ring].gpu_addr = 0;
30903- atomic_set(&rdev->fence_drv[ring].seq, 0);
30904+ atomic_set_unchecked(&rdev->fence_drv[ring].seq, 0);
30905 INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
30906 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
30907 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
30908diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
30909index 48b7cea..342236f 100644
30910--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
30911+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
30912@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
30913 request = compat_alloc_user_space(sizeof(*request));
30914 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
30915 || __put_user(req32.param, &request->param)
30916- || __put_user((void __user *)(unsigned long)req32.value,
30917+ || __put_user((unsigned long)req32.value,
30918 &request->value))
30919 return -EFAULT;
30920
30921diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
30922index 00da384..32f972d 100644
30923--- a/drivers/gpu/drm/radeon/radeon_irq.c
30924+++ b/drivers/gpu/drm/radeon/radeon_irq.c
30925@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
30926 unsigned int ret;
30927 RING_LOCALS;
30928
30929- atomic_inc(&dev_priv->swi_emitted);
30930- ret = atomic_read(&dev_priv->swi_emitted);
30931+ atomic_inc_unchecked(&dev_priv->swi_emitted);
30932+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
30933
30934 BEGIN_RING(4);
30935 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
30936@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
30937 drm_radeon_private_t *dev_priv =
30938 (drm_radeon_private_t *) dev->dev_private;
30939
30940- atomic_set(&dev_priv->swi_emitted, 0);
30941+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
30942 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
30943
30944 dev->max_vblank_count = 0x001fffff;
30945diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
30946index e8422ae..d22d4a8 100644
30947--- a/drivers/gpu/drm/radeon/radeon_state.c
30948+++ b/drivers/gpu/drm/radeon/radeon_state.c
30949@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
30950 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
30951 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
30952
30953- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
30954+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
30955 sarea_priv->nbox * sizeof(depth_boxes[0])))
30956 return -EFAULT;
30957
30958@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
30959 {
30960 drm_radeon_private_t *dev_priv = dev->dev_private;
30961 drm_radeon_getparam_t *param = data;
30962- int value;
30963+ int value = 0;
30964
30965 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
30966
30967diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
30968index f493c64..524ab6b 100644
30969--- a/drivers/gpu/drm/radeon/radeon_ttm.c
30970+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
30971@@ -843,8 +843,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
30972 }
30973 if (unlikely(ttm_vm_ops == NULL)) {
30974 ttm_vm_ops = vma->vm_ops;
30975- radeon_ttm_vm_ops = *ttm_vm_ops;
30976- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30977+ pax_open_kernel();
30978+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
30979+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30980+ pax_close_kernel();
30981 }
30982 vma->vm_ops = &radeon_ttm_vm_ops;
30983 return 0;
30984diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
30985index f2c3b9d..d5a376b 100644
30986--- a/drivers/gpu/drm/radeon/rs690.c
30987+++ b/drivers/gpu/drm/radeon/rs690.c
30988@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
30989 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
30990 rdev->pm.sideport_bandwidth.full)
30991 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
30992- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
30993+ read_delay_latency.full = dfixed_const(800 * 1000);
30994 read_delay_latency.full = dfixed_div(read_delay_latency,
30995 rdev->pm.igp_sideport_mclk);
30996+ a.full = dfixed_const(370);
30997+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
30998 } else {
30999 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
31000 rdev->pm.k8_bandwidth.full)
31001diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
31002index ebc6fac..a8313ed 100644
31003--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
31004+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
31005@@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
31006 static int ttm_pool_mm_shrink(struct shrinker *shrink,
31007 struct shrink_control *sc)
31008 {
31009- static atomic_t start_pool = ATOMIC_INIT(0);
31010+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
31011 unsigned i;
31012- unsigned pool_offset = atomic_add_return(1, &start_pool);
31013+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
31014 struct ttm_page_pool *pool;
31015 int shrink_pages = sc->nr_to_scan;
31016
31017diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
31018index 88edacc..1e5412b 100644
31019--- a/drivers/gpu/drm/via/via_drv.h
31020+++ b/drivers/gpu/drm/via/via_drv.h
31021@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
31022 typedef uint32_t maskarray_t[5];
31023
31024 typedef struct drm_via_irq {
31025- atomic_t irq_received;
31026+ atomic_unchecked_t irq_received;
31027 uint32_t pending_mask;
31028 uint32_t enable_mask;
31029 wait_queue_head_t irq_queue;
31030@@ -75,7 +75,7 @@ typedef struct drm_via_private {
31031 struct timeval last_vblank;
31032 int last_vblank_valid;
31033 unsigned usec_per_vblank;
31034- atomic_t vbl_received;
31035+ atomic_unchecked_t vbl_received;
31036 drm_via_state_t hc_state;
31037 char pci_buf[VIA_PCI_BUF_SIZE];
31038 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
31039diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
31040index d391f48..10c8ca3 100644
31041--- a/drivers/gpu/drm/via/via_irq.c
31042+++ b/drivers/gpu/drm/via/via_irq.c
31043@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
31044 if (crtc != 0)
31045 return 0;
31046
31047- return atomic_read(&dev_priv->vbl_received);
31048+ return atomic_read_unchecked(&dev_priv->vbl_received);
31049 }
31050
31051 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31052@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31053
31054 status = VIA_READ(VIA_REG_INTERRUPT);
31055 if (status & VIA_IRQ_VBLANK_PENDING) {
31056- atomic_inc(&dev_priv->vbl_received);
31057- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
31058+ atomic_inc_unchecked(&dev_priv->vbl_received);
31059+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
31060 do_gettimeofday(&cur_vblank);
31061 if (dev_priv->last_vblank_valid) {
31062 dev_priv->usec_per_vblank =
31063@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31064 dev_priv->last_vblank = cur_vblank;
31065 dev_priv->last_vblank_valid = 1;
31066 }
31067- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
31068+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
31069 DRM_DEBUG("US per vblank is: %u\n",
31070 dev_priv->usec_per_vblank);
31071 }
31072@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31073
31074 for (i = 0; i < dev_priv->num_irqs; ++i) {
31075 if (status & cur_irq->pending_mask) {
31076- atomic_inc(&cur_irq->irq_received);
31077+ atomic_inc_unchecked(&cur_irq->irq_received);
31078 DRM_WAKEUP(&cur_irq->irq_queue);
31079 handled = 1;
31080 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
31081@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
31082 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31083 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
31084 masks[irq][4]));
31085- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
31086+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
31087 } else {
31088 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31089 (((cur_irq_sequence =
31090- atomic_read(&cur_irq->irq_received)) -
31091+ atomic_read_unchecked(&cur_irq->irq_received)) -
31092 *sequence) <= (1 << 23)));
31093 }
31094 *sequence = cur_irq_sequence;
31095@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
31096 }
31097
31098 for (i = 0; i < dev_priv->num_irqs; ++i) {
31099- atomic_set(&cur_irq->irq_received, 0);
31100+ atomic_set_unchecked(&cur_irq->irq_received, 0);
31101 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
31102 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
31103 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
31104@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
31105 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
31106 case VIA_IRQ_RELATIVE:
31107 irqwait->request.sequence +=
31108- atomic_read(&cur_irq->irq_received);
31109+ atomic_read_unchecked(&cur_irq->irq_received);
31110 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
31111 case VIA_IRQ_ABSOLUTE:
31112 break;
31113diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31114index d0f2c07..9ebd9c3 100644
31115--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31116+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31117@@ -263,7 +263,7 @@ struct vmw_private {
31118 * Fencing and IRQs.
31119 */
31120
31121- atomic_t marker_seq;
31122+ atomic_unchecked_t marker_seq;
31123 wait_queue_head_t fence_queue;
31124 wait_queue_head_t fifo_queue;
31125 int fence_queue_waiters; /* Protected by hw_mutex */
31126diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31127index a0c2f12..68ae6cb 100644
31128--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31129+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31130@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
31131 (unsigned int) min,
31132 (unsigned int) fifo->capabilities);
31133
31134- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31135+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31136 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
31137 vmw_marker_queue_init(&fifo->marker_queue);
31138 return vmw_fifo_send_fence(dev_priv, &dummy);
31139@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
31140 if (reserveable)
31141 iowrite32(bytes, fifo_mem +
31142 SVGA_FIFO_RESERVED);
31143- return fifo_mem + (next_cmd >> 2);
31144+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
31145 } else {
31146 need_bounce = true;
31147 }
31148@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31149
31150 fm = vmw_fifo_reserve(dev_priv, bytes);
31151 if (unlikely(fm == NULL)) {
31152- *seqno = atomic_read(&dev_priv->marker_seq);
31153+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31154 ret = -ENOMEM;
31155 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
31156 false, 3*HZ);
31157@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31158 }
31159
31160 do {
31161- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
31162+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
31163 } while (*seqno == 0);
31164
31165 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
31166diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31167index cabc95f..14b3d77 100644
31168--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31169+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31170@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
31171 * emitted. Then the fence is stale and signaled.
31172 */
31173
31174- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
31175+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
31176 > VMW_FENCE_WRAP);
31177
31178 return ret;
31179@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
31180
31181 if (fifo_idle)
31182 down_read(&fifo_state->rwsem);
31183- signal_seq = atomic_read(&dev_priv->marker_seq);
31184+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
31185 ret = 0;
31186
31187 for (;;) {
31188diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31189index 8a8725c..afed796 100644
31190--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31191+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31192@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
31193 while (!vmw_lag_lt(queue, us)) {
31194 spin_lock(&queue->lock);
31195 if (list_empty(&queue->head))
31196- seqno = atomic_read(&dev_priv->marker_seq);
31197+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31198 else {
31199 marker = list_first_entry(&queue->head,
31200 struct vmw_marker, head);
31201diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
31202index 4da66b4..e948655 100644
31203--- a/drivers/hid/hid-core.c
31204+++ b/drivers/hid/hid-core.c
31205@@ -2063,7 +2063,7 @@ static bool hid_ignore(struct hid_device *hdev)
31206
31207 int hid_add_device(struct hid_device *hdev)
31208 {
31209- static atomic_t id = ATOMIC_INIT(0);
31210+ static atomic_unchecked_t id = ATOMIC_INIT(0);
31211 int ret;
31212
31213 if (WARN_ON(hdev->status & HID_STAT_ADDED))
31214@@ -2078,7 +2078,7 @@ int hid_add_device(struct hid_device *hdev)
31215 /* XXX hack, any other cleaner solution after the driver core
31216 * is converted to allow more than 20 bytes as the device name? */
31217 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
31218- hdev->vendor, hdev->product, atomic_inc_return(&id));
31219+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
31220
31221 hid_debug_register(hdev, dev_name(&hdev->dev));
31222 ret = device_add(&hdev->dev);
31223diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
31224index eec3291..8ed706b 100644
31225--- a/drivers/hid/hid-wiimote-debug.c
31226+++ b/drivers/hid/hid-wiimote-debug.c
31227@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
31228 else if (size == 0)
31229 return -EIO;
31230
31231- if (copy_to_user(u, buf, size))
31232+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
31233 return -EFAULT;
31234
31235 *off += size;
31236diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
31237index b1ec0e2..c295a61 100644
31238--- a/drivers/hid/usbhid/hiddev.c
31239+++ b/drivers/hid/usbhid/hiddev.c
31240@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
31241 break;
31242
31243 case HIDIOCAPPLICATION:
31244- if (arg < 0 || arg >= hid->maxapplication)
31245+ if (arg >= hid->maxapplication)
31246 break;
31247
31248 for (i = 0; i < hid->maxcollection; i++)
31249diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
31250index 4065374..10ed7dc 100644
31251--- a/drivers/hv/channel.c
31252+++ b/drivers/hv/channel.c
31253@@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
31254 int ret = 0;
31255 int t;
31256
31257- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
31258- atomic_inc(&vmbus_connection.next_gpadl_handle);
31259+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
31260+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
31261
31262 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
31263 if (ret)
31264diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
31265index 15956bd..ea34398 100644
31266--- a/drivers/hv/hv.c
31267+++ b/drivers/hv/hv.c
31268@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
31269 u64 output_address = (output) ? virt_to_phys(output) : 0;
31270 u32 output_address_hi = output_address >> 32;
31271 u32 output_address_lo = output_address & 0xFFFFFFFF;
31272- void *hypercall_page = hv_context.hypercall_page;
31273+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
31274
31275 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
31276 "=a"(hv_status_lo) : "d" (control_hi),
31277diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
31278index 699f0d8..f4f19250 100644
31279--- a/drivers/hv/hyperv_vmbus.h
31280+++ b/drivers/hv/hyperv_vmbus.h
31281@@ -555,7 +555,7 @@ enum vmbus_connect_state {
31282 struct vmbus_connection {
31283 enum vmbus_connect_state conn_state;
31284
31285- atomic_t next_gpadl_handle;
31286+ atomic_unchecked_t next_gpadl_handle;
31287
31288 /*
31289 * Represents channel interrupts. Each bit position represents a
31290diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
31291index a220e57..428f54d 100644
31292--- a/drivers/hv/vmbus_drv.c
31293+++ b/drivers/hv/vmbus_drv.c
31294@@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
31295 {
31296 int ret = 0;
31297
31298- static atomic_t device_num = ATOMIC_INIT(0);
31299+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
31300
31301 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
31302- atomic_inc_return(&device_num));
31303+ atomic_inc_return_unchecked(&device_num));
31304
31305 child_device_obj->device.bus = &hv_bus;
31306 child_device_obj->device.parent = &hv_acpi_dev->dev;
31307diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
31308index 9140236..ceaef4e 100644
31309--- a/drivers/hwmon/acpi_power_meter.c
31310+++ b/drivers/hwmon/acpi_power_meter.c
31311@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
31312 return res;
31313
31314 temp /= 1000;
31315- if (temp < 0)
31316- return -EINVAL;
31317
31318 mutex_lock(&resource->lock);
31319 resource->trip[attr->index - 7] = temp;
31320diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
31321index 8b011d0..3de24a1 100644
31322--- a/drivers/hwmon/sht15.c
31323+++ b/drivers/hwmon/sht15.c
31324@@ -166,7 +166,7 @@ struct sht15_data {
31325 int supply_uV;
31326 bool supply_uV_valid;
31327 struct work_struct update_supply_work;
31328- atomic_t interrupt_handled;
31329+ atomic_unchecked_t interrupt_handled;
31330 };
31331
31332 /**
31333@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
31334 return ret;
31335
31336 gpio_direction_input(data->pdata->gpio_data);
31337- atomic_set(&data->interrupt_handled, 0);
31338+ atomic_set_unchecked(&data->interrupt_handled, 0);
31339
31340 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31341 if (gpio_get_value(data->pdata->gpio_data) == 0) {
31342 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
31343 /* Only relevant if the interrupt hasn't occurred. */
31344- if (!atomic_read(&data->interrupt_handled))
31345+ if (!atomic_read_unchecked(&data->interrupt_handled))
31346 schedule_work(&data->read_work);
31347 }
31348 ret = wait_event_timeout(data->wait_queue,
31349@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
31350
31351 /* First disable the interrupt */
31352 disable_irq_nosync(irq);
31353- atomic_inc(&data->interrupt_handled);
31354+ atomic_inc_unchecked(&data->interrupt_handled);
31355 /* Then schedule a reading work struct */
31356 if (data->state != SHT15_READING_NOTHING)
31357 schedule_work(&data->read_work);
31358@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
31359 * If not, then start the interrupt again - care here as could
31360 * have gone low in meantime so verify it hasn't!
31361 */
31362- atomic_set(&data->interrupt_handled, 0);
31363+ atomic_set_unchecked(&data->interrupt_handled, 0);
31364 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31365 /* If still not occurred or another handler was scheduled */
31366 if (gpio_get_value(data->pdata->gpio_data)
31367- || atomic_read(&data->interrupt_handled))
31368+ || atomic_read_unchecked(&data->interrupt_handled))
31369 return;
31370 }
31371
31372diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
31373index 378fcb5..5e91fa8 100644
31374--- a/drivers/i2c/busses/i2c-amd756-s4882.c
31375+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
31376@@ -43,7 +43,7 @@
31377 extern struct i2c_adapter amd756_smbus;
31378
31379 static struct i2c_adapter *s4882_adapter;
31380-static struct i2c_algorithm *s4882_algo;
31381+static i2c_algorithm_no_const *s4882_algo;
31382
31383 /* Wrapper access functions for multiplexed SMBus */
31384 static DEFINE_MUTEX(amd756_lock);
31385diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
31386index 29015eb..af2d8e9 100644
31387--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
31388+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
31389@@ -41,7 +41,7 @@
31390 extern struct i2c_adapter *nforce2_smbus;
31391
31392 static struct i2c_adapter *s4985_adapter;
31393-static struct i2c_algorithm *s4985_algo;
31394+static i2c_algorithm_no_const *s4985_algo;
31395
31396 /* Wrapper access functions for multiplexed SMBus */
31397 static DEFINE_MUTEX(nforce2_lock);
31398diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
31399index d7a4833..7fae376 100644
31400--- a/drivers/i2c/i2c-mux.c
31401+++ b/drivers/i2c/i2c-mux.c
31402@@ -28,7 +28,7 @@
31403 /* multiplexer per channel data */
31404 struct i2c_mux_priv {
31405 struct i2c_adapter adap;
31406- struct i2c_algorithm algo;
31407+ i2c_algorithm_no_const algo;
31408
31409 struct i2c_adapter *parent;
31410 void *mux_dev; /* the mux chip/device */
31411diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
31412index 57d00ca..0145194 100644
31413--- a/drivers/ide/aec62xx.c
31414+++ b/drivers/ide/aec62xx.c
31415@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
31416 .cable_detect = atp86x_cable_detect,
31417 };
31418
31419-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
31420+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
31421 { /* 0: AEC6210 */
31422 .name = DRV_NAME,
31423 .init_chipset = init_chipset_aec62xx,
31424diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
31425index 2c8016a..911a27c 100644
31426--- a/drivers/ide/alim15x3.c
31427+++ b/drivers/ide/alim15x3.c
31428@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
31429 .dma_sff_read_status = ide_dma_sff_read_status,
31430 };
31431
31432-static const struct ide_port_info ali15x3_chipset __devinitdata = {
31433+static const struct ide_port_info ali15x3_chipset __devinitconst = {
31434 .name = DRV_NAME,
31435 .init_chipset = init_chipset_ali15x3,
31436 .init_hwif = init_hwif_ali15x3,
31437diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
31438index 3747b25..56fc995 100644
31439--- a/drivers/ide/amd74xx.c
31440+++ b/drivers/ide/amd74xx.c
31441@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
31442 .udma_mask = udma, \
31443 }
31444
31445-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
31446+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
31447 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
31448 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
31449 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
31450diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
31451index 15f0ead..cb43480 100644
31452--- a/drivers/ide/atiixp.c
31453+++ b/drivers/ide/atiixp.c
31454@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
31455 .cable_detect = atiixp_cable_detect,
31456 };
31457
31458-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
31459+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
31460 { /* 0: IXP200/300/400/700 */
31461 .name = DRV_NAME,
31462 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
31463diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
31464index 5f80312..d1fc438 100644
31465--- a/drivers/ide/cmd64x.c
31466+++ b/drivers/ide/cmd64x.c
31467@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
31468 .dma_sff_read_status = ide_dma_sff_read_status,
31469 };
31470
31471-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
31472+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
31473 { /* 0: CMD643 */
31474 .name = DRV_NAME,
31475 .init_chipset = init_chipset_cmd64x,
31476diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
31477index 2c1e5f7..1444762 100644
31478--- a/drivers/ide/cs5520.c
31479+++ b/drivers/ide/cs5520.c
31480@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
31481 .set_dma_mode = cs5520_set_dma_mode,
31482 };
31483
31484-static const struct ide_port_info cyrix_chipset __devinitdata = {
31485+static const struct ide_port_info cyrix_chipset __devinitconst = {
31486 .name = DRV_NAME,
31487 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
31488 .port_ops = &cs5520_port_ops,
31489diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
31490index 4dc4eb9..49b40ad 100644
31491--- a/drivers/ide/cs5530.c
31492+++ b/drivers/ide/cs5530.c
31493@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
31494 .udma_filter = cs5530_udma_filter,
31495 };
31496
31497-static const struct ide_port_info cs5530_chipset __devinitdata = {
31498+static const struct ide_port_info cs5530_chipset __devinitconst = {
31499 .name = DRV_NAME,
31500 .init_chipset = init_chipset_cs5530,
31501 .init_hwif = init_hwif_cs5530,
31502diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
31503index 5059faf..18d4c85 100644
31504--- a/drivers/ide/cs5535.c
31505+++ b/drivers/ide/cs5535.c
31506@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
31507 .cable_detect = cs5535_cable_detect,
31508 };
31509
31510-static const struct ide_port_info cs5535_chipset __devinitdata = {
31511+static const struct ide_port_info cs5535_chipset __devinitconst = {
31512 .name = DRV_NAME,
31513 .port_ops = &cs5535_port_ops,
31514 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
31515diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
31516index 847553f..3ffb49d 100644
31517--- a/drivers/ide/cy82c693.c
31518+++ b/drivers/ide/cy82c693.c
31519@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
31520 .set_dma_mode = cy82c693_set_dma_mode,
31521 };
31522
31523-static const struct ide_port_info cy82c693_chipset __devinitdata = {
31524+static const struct ide_port_info cy82c693_chipset __devinitconst = {
31525 .name = DRV_NAME,
31526 .init_iops = init_iops_cy82c693,
31527 .port_ops = &cy82c693_port_ops,
31528diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
31529index 58c51cd..4aec3b8 100644
31530--- a/drivers/ide/hpt366.c
31531+++ b/drivers/ide/hpt366.c
31532@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
31533 }
31534 };
31535
31536-static const struct hpt_info hpt36x __devinitdata = {
31537+static const struct hpt_info hpt36x __devinitconst = {
31538 .chip_name = "HPT36x",
31539 .chip_type = HPT36x,
31540 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
31541@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
31542 .timings = &hpt36x_timings
31543 };
31544
31545-static const struct hpt_info hpt370 __devinitdata = {
31546+static const struct hpt_info hpt370 __devinitconst = {
31547 .chip_name = "HPT370",
31548 .chip_type = HPT370,
31549 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31550@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
31551 .timings = &hpt37x_timings
31552 };
31553
31554-static const struct hpt_info hpt370a __devinitdata = {
31555+static const struct hpt_info hpt370a __devinitconst = {
31556 .chip_name = "HPT370A",
31557 .chip_type = HPT370A,
31558 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31559@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
31560 .timings = &hpt37x_timings
31561 };
31562
31563-static const struct hpt_info hpt374 __devinitdata = {
31564+static const struct hpt_info hpt374 __devinitconst = {
31565 .chip_name = "HPT374",
31566 .chip_type = HPT374,
31567 .udma_mask = ATA_UDMA5,
31568@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
31569 .timings = &hpt37x_timings
31570 };
31571
31572-static const struct hpt_info hpt372 __devinitdata = {
31573+static const struct hpt_info hpt372 __devinitconst = {
31574 .chip_name = "HPT372",
31575 .chip_type = HPT372,
31576 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31577@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
31578 .timings = &hpt37x_timings
31579 };
31580
31581-static const struct hpt_info hpt372a __devinitdata = {
31582+static const struct hpt_info hpt372a __devinitconst = {
31583 .chip_name = "HPT372A",
31584 .chip_type = HPT372A,
31585 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31586@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
31587 .timings = &hpt37x_timings
31588 };
31589
31590-static const struct hpt_info hpt302 __devinitdata = {
31591+static const struct hpt_info hpt302 __devinitconst = {
31592 .chip_name = "HPT302",
31593 .chip_type = HPT302,
31594 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31595@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
31596 .timings = &hpt37x_timings
31597 };
31598
31599-static const struct hpt_info hpt371 __devinitdata = {
31600+static const struct hpt_info hpt371 __devinitconst = {
31601 .chip_name = "HPT371",
31602 .chip_type = HPT371,
31603 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31604@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
31605 .timings = &hpt37x_timings
31606 };
31607
31608-static const struct hpt_info hpt372n __devinitdata = {
31609+static const struct hpt_info hpt372n __devinitconst = {
31610 .chip_name = "HPT372N",
31611 .chip_type = HPT372N,
31612 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31613@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
31614 .timings = &hpt37x_timings
31615 };
31616
31617-static const struct hpt_info hpt302n __devinitdata = {
31618+static const struct hpt_info hpt302n __devinitconst = {
31619 .chip_name = "HPT302N",
31620 .chip_type = HPT302N,
31621 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31622@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
31623 .timings = &hpt37x_timings
31624 };
31625
31626-static const struct hpt_info hpt371n __devinitdata = {
31627+static const struct hpt_info hpt371n __devinitconst = {
31628 .chip_name = "HPT371N",
31629 .chip_type = HPT371N,
31630 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31631@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
31632 .dma_sff_read_status = ide_dma_sff_read_status,
31633 };
31634
31635-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
31636+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
31637 { /* 0: HPT36x */
31638 .name = DRV_NAME,
31639 .init_chipset = init_chipset_hpt366,
31640diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
31641index 8126824..55a2798 100644
31642--- a/drivers/ide/ide-cd.c
31643+++ b/drivers/ide/ide-cd.c
31644@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
31645 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
31646 if ((unsigned long)buf & alignment
31647 || blk_rq_bytes(rq) & q->dma_pad_mask
31648- || object_is_on_stack(buf))
31649+ || object_starts_on_stack(buf))
31650 drive->dma = 0;
31651 }
31652 }
31653diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
31654index 7f56b73..dab5b67 100644
31655--- a/drivers/ide/ide-pci-generic.c
31656+++ b/drivers/ide/ide-pci-generic.c
31657@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
31658 .udma_mask = ATA_UDMA6, \
31659 }
31660
31661-static const struct ide_port_info generic_chipsets[] __devinitdata = {
31662+static const struct ide_port_info generic_chipsets[] __devinitconst = {
31663 /* 0: Unknown */
31664 DECLARE_GENERIC_PCI_DEV(0),
31665
31666diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
31667index 560e66d..d5dd180 100644
31668--- a/drivers/ide/it8172.c
31669+++ b/drivers/ide/it8172.c
31670@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
31671 .set_dma_mode = it8172_set_dma_mode,
31672 };
31673
31674-static const struct ide_port_info it8172_port_info __devinitdata = {
31675+static const struct ide_port_info it8172_port_info __devinitconst = {
31676 .name = DRV_NAME,
31677 .port_ops = &it8172_port_ops,
31678 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
31679diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
31680index 46816ba..1847aeb 100644
31681--- a/drivers/ide/it8213.c
31682+++ b/drivers/ide/it8213.c
31683@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
31684 .cable_detect = it8213_cable_detect,
31685 };
31686
31687-static const struct ide_port_info it8213_chipset __devinitdata = {
31688+static const struct ide_port_info it8213_chipset __devinitconst = {
31689 .name = DRV_NAME,
31690 .enablebits = { {0x41, 0x80, 0x80} },
31691 .port_ops = &it8213_port_ops,
31692diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
31693index 2e3169f..c5611db 100644
31694--- a/drivers/ide/it821x.c
31695+++ b/drivers/ide/it821x.c
31696@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
31697 .cable_detect = it821x_cable_detect,
31698 };
31699
31700-static const struct ide_port_info it821x_chipset __devinitdata = {
31701+static const struct ide_port_info it821x_chipset __devinitconst = {
31702 .name = DRV_NAME,
31703 .init_chipset = init_chipset_it821x,
31704 .init_hwif = init_hwif_it821x,
31705diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
31706index 74c2c4a..efddd7d 100644
31707--- a/drivers/ide/jmicron.c
31708+++ b/drivers/ide/jmicron.c
31709@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
31710 .cable_detect = jmicron_cable_detect,
31711 };
31712
31713-static const struct ide_port_info jmicron_chipset __devinitdata = {
31714+static const struct ide_port_info jmicron_chipset __devinitconst = {
31715 .name = DRV_NAME,
31716 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
31717 .port_ops = &jmicron_port_ops,
31718diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
31719index 95327a2..73f78d8 100644
31720--- a/drivers/ide/ns87415.c
31721+++ b/drivers/ide/ns87415.c
31722@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
31723 .dma_sff_read_status = superio_dma_sff_read_status,
31724 };
31725
31726-static const struct ide_port_info ns87415_chipset __devinitdata = {
31727+static const struct ide_port_info ns87415_chipset __devinitconst = {
31728 .name = DRV_NAME,
31729 .init_hwif = init_hwif_ns87415,
31730 .tp_ops = &ns87415_tp_ops,
31731diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
31732index 1a53a4c..39edc66 100644
31733--- a/drivers/ide/opti621.c
31734+++ b/drivers/ide/opti621.c
31735@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
31736 .set_pio_mode = opti621_set_pio_mode,
31737 };
31738
31739-static const struct ide_port_info opti621_chipset __devinitdata = {
31740+static const struct ide_port_info opti621_chipset __devinitconst = {
31741 .name = DRV_NAME,
31742 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
31743 .port_ops = &opti621_port_ops,
31744diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
31745index 9546fe2..2e5ceb6 100644
31746--- a/drivers/ide/pdc202xx_new.c
31747+++ b/drivers/ide/pdc202xx_new.c
31748@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
31749 .udma_mask = udma, \
31750 }
31751
31752-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
31753+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
31754 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
31755 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
31756 };
31757diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
31758index 3a35ec6..5634510 100644
31759--- a/drivers/ide/pdc202xx_old.c
31760+++ b/drivers/ide/pdc202xx_old.c
31761@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
31762 .max_sectors = sectors, \
31763 }
31764
31765-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
31766+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
31767 { /* 0: PDC20246 */
31768 .name = DRV_NAME,
31769 .init_chipset = init_chipset_pdc202xx,
31770diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
31771index 1892e81..fe0fd60 100644
31772--- a/drivers/ide/piix.c
31773+++ b/drivers/ide/piix.c
31774@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
31775 .udma_mask = udma, \
31776 }
31777
31778-static const struct ide_port_info piix_pci_info[] __devinitdata = {
31779+static const struct ide_port_info piix_pci_info[] __devinitconst = {
31780 /* 0: MPIIX */
31781 { /*
31782 * MPIIX actually has only a single IDE channel mapped to
31783diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
31784index a6414a8..c04173e 100644
31785--- a/drivers/ide/rz1000.c
31786+++ b/drivers/ide/rz1000.c
31787@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
31788 }
31789 }
31790
31791-static const struct ide_port_info rz1000_chipset __devinitdata = {
31792+static const struct ide_port_info rz1000_chipset __devinitconst = {
31793 .name = DRV_NAME,
31794 .host_flags = IDE_HFLAG_NO_DMA,
31795 };
31796diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
31797index 356b9b5..d4758eb 100644
31798--- a/drivers/ide/sc1200.c
31799+++ b/drivers/ide/sc1200.c
31800@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
31801 .dma_sff_read_status = ide_dma_sff_read_status,
31802 };
31803
31804-static const struct ide_port_info sc1200_chipset __devinitdata = {
31805+static const struct ide_port_info sc1200_chipset __devinitconst = {
31806 .name = DRV_NAME,
31807 .port_ops = &sc1200_port_ops,
31808 .dma_ops = &sc1200_dma_ops,
31809diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
31810index b7f5b0c..9701038 100644
31811--- a/drivers/ide/scc_pata.c
31812+++ b/drivers/ide/scc_pata.c
31813@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
31814 .dma_sff_read_status = scc_dma_sff_read_status,
31815 };
31816
31817-static const struct ide_port_info scc_chipset __devinitdata = {
31818+static const struct ide_port_info scc_chipset __devinitconst = {
31819 .name = "sccIDE",
31820 .init_iops = init_iops_scc,
31821 .init_dma = scc_init_dma,
31822diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
31823index 35fb8da..24d72ef 100644
31824--- a/drivers/ide/serverworks.c
31825+++ b/drivers/ide/serverworks.c
31826@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
31827 .cable_detect = svwks_cable_detect,
31828 };
31829
31830-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
31831+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
31832 { /* 0: OSB4 */
31833 .name = DRV_NAME,
31834 .init_chipset = init_chipset_svwks,
31835diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
31836index ddeda44..46f7e30 100644
31837--- a/drivers/ide/siimage.c
31838+++ b/drivers/ide/siimage.c
31839@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
31840 .udma_mask = ATA_UDMA6, \
31841 }
31842
31843-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
31844+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
31845 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
31846 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
31847 };
31848diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
31849index 4a00225..09e61b4 100644
31850--- a/drivers/ide/sis5513.c
31851+++ b/drivers/ide/sis5513.c
31852@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
31853 .cable_detect = sis_cable_detect,
31854 };
31855
31856-static const struct ide_port_info sis5513_chipset __devinitdata = {
31857+static const struct ide_port_info sis5513_chipset __devinitconst = {
31858 .name = DRV_NAME,
31859 .init_chipset = init_chipset_sis5513,
31860 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
31861diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
31862index f21dc2a..d051cd2 100644
31863--- a/drivers/ide/sl82c105.c
31864+++ b/drivers/ide/sl82c105.c
31865@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
31866 .dma_sff_read_status = ide_dma_sff_read_status,
31867 };
31868
31869-static const struct ide_port_info sl82c105_chipset __devinitdata = {
31870+static const struct ide_port_info sl82c105_chipset __devinitconst = {
31871 .name = DRV_NAME,
31872 .init_chipset = init_chipset_sl82c105,
31873 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
31874diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
31875index 864ffe0..863a5e9 100644
31876--- a/drivers/ide/slc90e66.c
31877+++ b/drivers/ide/slc90e66.c
31878@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
31879 .cable_detect = slc90e66_cable_detect,
31880 };
31881
31882-static const struct ide_port_info slc90e66_chipset __devinitdata = {
31883+static const struct ide_port_info slc90e66_chipset __devinitconst = {
31884 .name = DRV_NAME,
31885 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
31886 .port_ops = &slc90e66_port_ops,
31887diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
31888index 4799d5c..1794678 100644
31889--- a/drivers/ide/tc86c001.c
31890+++ b/drivers/ide/tc86c001.c
31891@@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
31892 .dma_sff_read_status = ide_dma_sff_read_status,
31893 };
31894
31895-static const struct ide_port_info tc86c001_chipset __devinitdata = {
31896+static const struct ide_port_info tc86c001_chipset __devinitconst = {
31897 .name = DRV_NAME,
31898 .init_hwif = init_hwif_tc86c001,
31899 .port_ops = &tc86c001_port_ops,
31900diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
31901index 281c914..55ce1b8 100644
31902--- a/drivers/ide/triflex.c
31903+++ b/drivers/ide/triflex.c
31904@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
31905 .set_dma_mode = triflex_set_mode,
31906 };
31907
31908-static const struct ide_port_info triflex_device __devinitdata = {
31909+static const struct ide_port_info triflex_device __devinitconst = {
31910 .name = DRV_NAME,
31911 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
31912 .port_ops = &triflex_port_ops,
31913diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
31914index 4b42ca0..e494a98 100644
31915--- a/drivers/ide/trm290.c
31916+++ b/drivers/ide/trm290.c
31917@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
31918 .dma_check = trm290_dma_check,
31919 };
31920
31921-static const struct ide_port_info trm290_chipset __devinitdata = {
31922+static const struct ide_port_info trm290_chipset __devinitconst = {
31923 .name = DRV_NAME,
31924 .init_hwif = init_hwif_trm290,
31925 .tp_ops = &trm290_tp_ops,
31926diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
31927index f46f49c..eb77678 100644
31928--- a/drivers/ide/via82cxxx.c
31929+++ b/drivers/ide/via82cxxx.c
31930@@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
31931 .cable_detect = via82cxxx_cable_detect,
31932 };
31933
31934-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
31935+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
31936 .name = DRV_NAME,
31937 .init_chipset = init_chipset_via82cxxx,
31938 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
31939diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
31940index 73d4531..c90cd2d 100644
31941--- a/drivers/ieee802154/fakehard.c
31942+++ b/drivers/ieee802154/fakehard.c
31943@@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
31944 phy->transmit_power = 0xbf;
31945
31946 dev->netdev_ops = &fake_ops;
31947- dev->ml_priv = &fake_mlme;
31948+ dev->ml_priv = (void *)&fake_mlme;
31949
31950 priv = netdev_priv(dev);
31951 priv->phy = phy;
31952diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
31953index c889aae..6cf5aa7 100644
31954--- a/drivers/infiniband/core/cm.c
31955+++ b/drivers/infiniband/core/cm.c
31956@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
31957
31958 struct cm_counter_group {
31959 struct kobject obj;
31960- atomic_long_t counter[CM_ATTR_COUNT];
31961+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
31962 };
31963
31964 struct cm_counter_attribute {
31965@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
31966 struct ib_mad_send_buf *msg = NULL;
31967 int ret;
31968
31969- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31970+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31971 counter[CM_REQ_COUNTER]);
31972
31973 /* Quick state check to discard duplicate REQs. */
31974@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
31975 if (!cm_id_priv)
31976 return;
31977
31978- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31979+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31980 counter[CM_REP_COUNTER]);
31981 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
31982 if (ret)
31983@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
31984 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
31985 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
31986 spin_unlock_irq(&cm_id_priv->lock);
31987- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31988+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31989 counter[CM_RTU_COUNTER]);
31990 goto out;
31991 }
31992@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
31993 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
31994 dreq_msg->local_comm_id);
31995 if (!cm_id_priv) {
31996- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31997+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31998 counter[CM_DREQ_COUNTER]);
31999 cm_issue_drep(work->port, work->mad_recv_wc);
32000 return -EINVAL;
32001@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
32002 case IB_CM_MRA_REP_RCVD:
32003 break;
32004 case IB_CM_TIMEWAIT:
32005- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32006+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32007 counter[CM_DREQ_COUNTER]);
32008 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
32009 goto unlock;
32010@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
32011 cm_free_msg(msg);
32012 goto deref;
32013 case IB_CM_DREQ_RCVD:
32014- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32015+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32016 counter[CM_DREQ_COUNTER]);
32017 goto unlock;
32018 default:
32019@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
32020 ib_modify_mad(cm_id_priv->av.port->mad_agent,
32021 cm_id_priv->msg, timeout)) {
32022 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
32023- atomic_long_inc(&work->port->
32024+ atomic_long_inc_unchecked(&work->port->
32025 counter_group[CM_RECV_DUPLICATES].
32026 counter[CM_MRA_COUNTER]);
32027 goto out;
32028@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
32029 break;
32030 case IB_CM_MRA_REQ_RCVD:
32031 case IB_CM_MRA_REP_RCVD:
32032- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32033+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32034 counter[CM_MRA_COUNTER]);
32035 /* fall through */
32036 default:
32037@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
32038 case IB_CM_LAP_IDLE:
32039 break;
32040 case IB_CM_MRA_LAP_SENT:
32041- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32042+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32043 counter[CM_LAP_COUNTER]);
32044 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
32045 goto unlock;
32046@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
32047 cm_free_msg(msg);
32048 goto deref;
32049 case IB_CM_LAP_RCVD:
32050- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32051+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32052 counter[CM_LAP_COUNTER]);
32053 goto unlock;
32054 default:
32055@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
32056 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
32057 if (cur_cm_id_priv) {
32058 spin_unlock_irq(&cm.lock);
32059- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32060+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32061 counter[CM_SIDR_REQ_COUNTER]);
32062 goto out; /* Duplicate message. */
32063 }
32064@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
32065 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
32066 msg->retries = 1;
32067
32068- atomic_long_add(1 + msg->retries,
32069+ atomic_long_add_unchecked(1 + msg->retries,
32070 &port->counter_group[CM_XMIT].counter[attr_index]);
32071 if (msg->retries)
32072- atomic_long_add(msg->retries,
32073+ atomic_long_add_unchecked(msg->retries,
32074 &port->counter_group[CM_XMIT_RETRIES].
32075 counter[attr_index]);
32076
32077@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
32078 }
32079
32080 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
32081- atomic_long_inc(&port->counter_group[CM_RECV].
32082+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
32083 counter[attr_id - CM_ATTR_ID_OFFSET]);
32084
32085 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
32086@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
32087 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
32088
32089 return sprintf(buf, "%ld\n",
32090- atomic_long_read(&group->counter[cm_attr->index]));
32091+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
32092 }
32093
32094 static const struct sysfs_ops cm_counter_ops = {
32095diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
32096index 176c8f9..2627b62 100644
32097--- a/drivers/infiniband/core/fmr_pool.c
32098+++ b/drivers/infiniband/core/fmr_pool.c
32099@@ -98,8 +98,8 @@ struct ib_fmr_pool {
32100
32101 struct task_struct *thread;
32102
32103- atomic_t req_ser;
32104- atomic_t flush_ser;
32105+ atomic_unchecked_t req_ser;
32106+ atomic_unchecked_t flush_ser;
32107
32108 wait_queue_head_t force_wait;
32109 };
32110@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32111 struct ib_fmr_pool *pool = pool_ptr;
32112
32113 do {
32114- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
32115+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
32116 ib_fmr_batch_release(pool);
32117
32118- atomic_inc(&pool->flush_ser);
32119+ atomic_inc_unchecked(&pool->flush_ser);
32120 wake_up_interruptible(&pool->force_wait);
32121
32122 if (pool->flush_function)
32123@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32124 }
32125
32126 set_current_state(TASK_INTERRUPTIBLE);
32127- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
32128+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
32129 !kthread_should_stop())
32130 schedule();
32131 __set_current_state(TASK_RUNNING);
32132@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
32133 pool->dirty_watermark = params->dirty_watermark;
32134 pool->dirty_len = 0;
32135 spin_lock_init(&pool->pool_lock);
32136- atomic_set(&pool->req_ser, 0);
32137- atomic_set(&pool->flush_ser, 0);
32138+ atomic_set_unchecked(&pool->req_ser, 0);
32139+ atomic_set_unchecked(&pool->flush_ser, 0);
32140 init_waitqueue_head(&pool->force_wait);
32141
32142 pool->thread = kthread_run(ib_fmr_cleanup_thread,
32143@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
32144 }
32145 spin_unlock_irq(&pool->pool_lock);
32146
32147- serial = atomic_inc_return(&pool->req_ser);
32148+ serial = atomic_inc_return_unchecked(&pool->req_ser);
32149 wake_up_process(pool->thread);
32150
32151 if (wait_event_interruptible(pool->force_wait,
32152- atomic_read(&pool->flush_ser) - serial >= 0))
32153+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
32154 return -EINTR;
32155
32156 return 0;
32157@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
32158 } else {
32159 list_add_tail(&fmr->list, &pool->dirty_list);
32160 if (++pool->dirty_len >= pool->dirty_watermark) {
32161- atomic_inc(&pool->req_ser);
32162+ atomic_inc_unchecked(&pool->req_ser);
32163 wake_up_process(pool->thread);
32164 }
32165 }
32166diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
32167index 40c8353..946b0e4 100644
32168--- a/drivers/infiniband/hw/cxgb4/mem.c
32169+++ b/drivers/infiniband/hw/cxgb4/mem.c
32170@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32171 int err;
32172 struct fw_ri_tpte tpt;
32173 u32 stag_idx;
32174- static atomic_t key;
32175+ static atomic_unchecked_t key;
32176
32177 if (c4iw_fatal_error(rdev))
32178 return -EIO;
32179@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32180 &rdev->resource.tpt_fifo_lock);
32181 if (!stag_idx)
32182 return -ENOMEM;
32183- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
32184+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
32185 }
32186 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
32187 __func__, stag_state, type, pdid, stag_idx);
32188diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
32189index 79b3dbc..96e5fcc 100644
32190--- a/drivers/infiniband/hw/ipath/ipath_rc.c
32191+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
32192@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32193 struct ib_atomic_eth *ateth;
32194 struct ipath_ack_entry *e;
32195 u64 vaddr;
32196- atomic64_t *maddr;
32197+ atomic64_unchecked_t *maddr;
32198 u64 sdata;
32199 u32 rkey;
32200 u8 next;
32201@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32202 IB_ACCESS_REMOTE_ATOMIC)))
32203 goto nack_acc_unlck;
32204 /* Perform atomic OP and save result. */
32205- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32206+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32207 sdata = be64_to_cpu(ateth->swap_data);
32208 e = &qp->s_ack_queue[qp->r_head_ack_queue];
32209 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
32210- (u64) atomic64_add_return(sdata, maddr) - sdata :
32211+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32212 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32213 be64_to_cpu(ateth->compare_data),
32214 sdata);
32215diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
32216index 1f95bba..9530f87 100644
32217--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
32218+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
32219@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
32220 unsigned long flags;
32221 struct ib_wc wc;
32222 u64 sdata;
32223- atomic64_t *maddr;
32224+ atomic64_unchecked_t *maddr;
32225 enum ib_wc_status send_status;
32226
32227 /*
32228@@ -382,11 +382,11 @@ again:
32229 IB_ACCESS_REMOTE_ATOMIC)))
32230 goto acc_err;
32231 /* Perform atomic OP and save result. */
32232- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32233+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32234 sdata = wqe->wr.wr.atomic.compare_add;
32235 *(u64 *) sqp->s_sge.sge.vaddr =
32236 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
32237- (u64) atomic64_add_return(sdata, maddr) - sdata :
32238+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32239 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32240 sdata, wqe->wr.wr.atomic.swap);
32241 goto send_comp;
32242diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
32243index 7140199..da60063 100644
32244--- a/drivers/infiniband/hw/nes/nes.c
32245+++ b/drivers/infiniband/hw/nes/nes.c
32246@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
32247 LIST_HEAD(nes_adapter_list);
32248 static LIST_HEAD(nes_dev_list);
32249
32250-atomic_t qps_destroyed;
32251+atomic_unchecked_t qps_destroyed;
32252
32253 static unsigned int ee_flsh_adapter;
32254 static unsigned int sysfs_nonidx_addr;
32255@@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
32256 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
32257 struct nes_adapter *nesadapter = nesdev->nesadapter;
32258
32259- atomic_inc(&qps_destroyed);
32260+ atomic_inc_unchecked(&qps_destroyed);
32261
32262 /* Free the control structures */
32263
32264diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
32265index c438e46..ca30356 100644
32266--- a/drivers/infiniband/hw/nes/nes.h
32267+++ b/drivers/infiniband/hw/nes/nes.h
32268@@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
32269 extern unsigned int wqm_quanta;
32270 extern struct list_head nes_adapter_list;
32271
32272-extern atomic_t cm_connects;
32273-extern atomic_t cm_accepts;
32274-extern atomic_t cm_disconnects;
32275-extern atomic_t cm_closes;
32276-extern atomic_t cm_connecteds;
32277-extern atomic_t cm_connect_reqs;
32278-extern atomic_t cm_rejects;
32279-extern atomic_t mod_qp_timouts;
32280-extern atomic_t qps_created;
32281-extern atomic_t qps_destroyed;
32282-extern atomic_t sw_qps_destroyed;
32283+extern atomic_unchecked_t cm_connects;
32284+extern atomic_unchecked_t cm_accepts;
32285+extern atomic_unchecked_t cm_disconnects;
32286+extern atomic_unchecked_t cm_closes;
32287+extern atomic_unchecked_t cm_connecteds;
32288+extern atomic_unchecked_t cm_connect_reqs;
32289+extern atomic_unchecked_t cm_rejects;
32290+extern atomic_unchecked_t mod_qp_timouts;
32291+extern atomic_unchecked_t qps_created;
32292+extern atomic_unchecked_t qps_destroyed;
32293+extern atomic_unchecked_t sw_qps_destroyed;
32294 extern u32 mh_detected;
32295 extern u32 mh_pauses_sent;
32296 extern u32 cm_packets_sent;
32297@@ -197,16 +197,16 @@ extern u32 cm_packets_created;
32298 extern u32 cm_packets_received;
32299 extern u32 cm_packets_dropped;
32300 extern u32 cm_packets_retrans;
32301-extern atomic_t cm_listens_created;
32302-extern atomic_t cm_listens_destroyed;
32303+extern atomic_unchecked_t cm_listens_created;
32304+extern atomic_unchecked_t cm_listens_destroyed;
32305 extern u32 cm_backlog_drops;
32306-extern atomic_t cm_loopbacks;
32307-extern atomic_t cm_nodes_created;
32308-extern atomic_t cm_nodes_destroyed;
32309-extern atomic_t cm_accel_dropped_pkts;
32310-extern atomic_t cm_resets_recvd;
32311-extern atomic_t pau_qps_created;
32312-extern atomic_t pau_qps_destroyed;
32313+extern atomic_unchecked_t cm_loopbacks;
32314+extern atomic_unchecked_t cm_nodes_created;
32315+extern atomic_unchecked_t cm_nodes_destroyed;
32316+extern atomic_unchecked_t cm_accel_dropped_pkts;
32317+extern atomic_unchecked_t cm_resets_recvd;
32318+extern atomic_unchecked_t pau_qps_created;
32319+extern atomic_unchecked_t pau_qps_destroyed;
32320
32321 extern u32 int_mod_timer_init;
32322 extern u32 int_mod_cq_depth_256;
32323diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
32324index 71edfbb..15b62ae 100644
32325--- a/drivers/infiniband/hw/nes/nes_cm.c
32326+++ b/drivers/infiniband/hw/nes/nes_cm.c
32327@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
32328 u32 cm_packets_retrans;
32329 u32 cm_packets_created;
32330 u32 cm_packets_received;
32331-atomic_t cm_listens_created;
32332-atomic_t cm_listens_destroyed;
32333+atomic_unchecked_t cm_listens_created;
32334+atomic_unchecked_t cm_listens_destroyed;
32335 u32 cm_backlog_drops;
32336-atomic_t cm_loopbacks;
32337-atomic_t cm_nodes_created;
32338-atomic_t cm_nodes_destroyed;
32339-atomic_t cm_accel_dropped_pkts;
32340-atomic_t cm_resets_recvd;
32341+atomic_unchecked_t cm_loopbacks;
32342+atomic_unchecked_t cm_nodes_created;
32343+atomic_unchecked_t cm_nodes_destroyed;
32344+atomic_unchecked_t cm_accel_dropped_pkts;
32345+atomic_unchecked_t cm_resets_recvd;
32346
32347 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
32348 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
32349@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
32350
32351 static struct nes_cm_core *g_cm_core;
32352
32353-atomic_t cm_connects;
32354-atomic_t cm_accepts;
32355-atomic_t cm_disconnects;
32356-atomic_t cm_closes;
32357-atomic_t cm_connecteds;
32358-atomic_t cm_connect_reqs;
32359-atomic_t cm_rejects;
32360+atomic_unchecked_t cm_connects;
32361+atomic_unchecked_t cm_accepts;
32362+atomic_unchecked_t cm_disconnects;
32363+atomic_unchecked_t cm_closes;
32364+atomic_unchecked_t cm_connecteds;
32365+atomic_unchecked_t cm_connect_reqs;
32366+atomic_unchecked_t cm_rejects;
32367
32368 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
32369 {
32370@@ -1279,7 +1279,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
32371 kfree(listener);
32372 listener = NULL;
32373 ret = 0;
32374- atomic_inc(&cm_listens_destroyed);
32375+ atomic_inc_unchecked(&cm_listens_destroyed);
32376 } else {
32377 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
32378 }
32379@@ -1482,7 +1482,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
32380 cm_node->rem_mac);
32381
32382 add_hte_node(cm_core, cm_node);
32383- atomic_inc(&cm_nodes_created);
32384+ atomic_inc_unchecked(&cm_nodes_created);
32385
32386 return cm_node;
32387 }
32388@@ -1540,7 +1540,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
32389 }
32390
32391 atomic_dec(&cm_core->node_cnt);
32392- atomic_inc(&cm_nodes_destroyed);
32393+ atomic_inc_unchecked(&cm_nodes_destroyed);
32394 nesqp = cm_node->nesqp;
32395 if (nesqp) {
32396 nesqp->cm_node = NULL;
32397@@ -1604,7 +1604,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
32398
32399 static void drop_packet(struct sk_buff *skb)
32400 {
32401- atomic_inc(&cm_accel_dropped_pkts);
32402+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
32403 dev_kfree_skb_any(skb);
32404 }
32405
32406@@ -1667,7 +1667,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
32407 {
32408
32409 int reset = 0; /* whether to send reset in case of err.. */
32410- atomic_inc(&cm_resets_recvd);
32411+ atomic_inc_unchecked(&cm_resets_recvd);
32412 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
32413 " refcnt=%d\n", cm_node, cm_node->state,
32414 atomic_read(&cm_node->ref_count));
32415@@ -2308,7 +2308,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
32416 rem_ref_cm_node(cm_node->cm_core, cm_node);
32417 return NULL;
32418 }
32419- atomic_inc(&cm_loopbacks);
32420+ atomic_inc_unchecked(&cm_loopbacks);
32421 loopbackremotenode->loopbackpartner = cm_node;
32422 loopbackremotenode->tcp_cntxt.rcv_wscale =
32423 NES_CM_DEFAULT_RCV_WND_SCALE;
32424@@ -2583,7 +2583,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
32425 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
32426 else {
32427 rem_ref_cm_node(cm_core, cm_node);
32428- atomic_inc(&cm_accel_dropped_pkts);
32429+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
32430 dev_kfree_skb_any(skb);
32431 }
32432 break;
32433@@ -2890,7 +2890,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32434
32435 if ((cm_id) && (cm_id->event_handler)) {
32436 if (issue_disconn) {
32437- atomic_inc(&cm_disconnects);
32438+ atomic_inc_unchecked(&cm_disconnects);
32439 cm_event.event = IW_CM_EVENT_DISCONNECT;
32440 cm_event.status = disconn_status;
32441 cm_event.local_addr = cm_id->local_addr;
32442@@ -2912,7 +2912,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32443 }
32444
32445 if (issue_close) {
32446- atomic_inc(&cm_closes);
32447+ atomic_inc_unchecked(&cm_closes);
32448 nes_disconnect(nesqp, 1);
32449
32450 cm_id->provider_data = nesqp;
32451@@ -3048,7 +3048,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32452
32453 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
32454 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
32455- atomic_inc(&cm_accepts);
32456+ atomic_inc_unchecked(&cm_accepts);
32457
32458 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
32459 netdev_refcnt_read(nesvnic->netdev));
32460@@ -3250,7 +3250,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
32461 struct nes_cm_core *cm_core;
32462 u8 *start_buff;
32463
32464- atomic_inc(&cm_rejects);
32465+ atomic_inc_unchecked(&cm_rejects);
32466 cm_node = (struct nes_cm_node *)cm_id->provider_data;
32467 loopback = cm_node->loopbackpartner;
32468 cm_core = cm_node->cm_core;
32469@@ -3310,7 +3310,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32470 ntohl(cm_id->local_addr.sin_addr.s_addr),
32471 ntohs(cm_id->local_addr.sin_port));
32472
32473- atomic_inc(&cm_connects);
32474+ atomic_inc_unchecked(&cm_connects);
32475 nesqp->active_conn = 1;
32476
32477 /* cache the cm_id in the qp */
32478@@ -3416,7 +3416,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
32479 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
32480 return err;
32481 }
32482- atomic_inc(&cm_listens_created);
32483+ atomic_inc_unchecked(&cm_listens_created);
32484 }
32485
32486 cm_id->add_ref(cm_id);
32487@@ -3517,7 +3517,7 @@ static void cm_event_connected(struct nes_cm_event *event)
32488
32489 if (nesqp->destroyed)
32490 return;
32491- atomic_inc(&cm_connecteds);
32492+ atomic_inc_unchecked(&cm_connecteds);
32493 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
32494 " local port 0x%04X. jiffies = %lu.\n",
32495 nesqp->hwqp.qp_id,
32496@@ -3704,7 +3704,7 @@ static void cm_event_reset(struct nes_cm_event *event)
32497
32498 cm_id->add_ref(cm_id);
32499 ret = cm_id->event_handler(cm_id, &cm_event);
32500- atomic_inc(&cm_closes);
32501+ atomic_inc_unchecked(&cm_closes);
32502 cm_event.event = IW_CM_EVENT_CLOSE;
32503 cm_event.status = 0;
32504 cm_event.provider_data = cm_id->provider_data;
32505@@ -3740,7 +3740,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
32506 return;
32507 cm_id = cm_node->cm_id;
32508
32509- atomic_inc(&cm_connect_reqs);
32510+ atomic_inc_unchecked(&cm_connect_reqs);
32511 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32512 cm_node, cm_id, jiffies);
32513
32514@@ -3780,7 +3780,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
32515 return;
32516 cm_id = cm_node->cm_id;
32517
32518- atomic_inc(&cm_connect_reqs);
32519+ atomic_inc_unchecked(&cm_connect_reqs);
32520 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32521 cm_node, cm_id, jiffies);
32522
32523diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
32524index 3ba7be3..c81f6ff 100644
32525--- a/drivers/infiniband/hw/nes/nes_mgt.c
32526+++ b/drivers/infiniband/hw/nes/nes_mgt.c
32527@@ -40,8 +40,8 @@
32528 #include "nes.h"
32529 #include "nes_mgt.h"
32530
32531-atomic_t pau_qps_created;
32532-atomic_t pau_qps_destroyed;
32533+atomic_unchecked_t pau_qps_created;
32534+atomic_unchecked_t pau_qps_destroyed;
32535
32536 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
32537 {
32538@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
32539 {
32540 struct sk_buff *skb;
32541 unsigned long flags;
32542- atomic_inc(&pau_qps_destroyed);
32543+ atomic_inc_unchecked(&pau_qps_destroyed);
32544
32545 /* Free packets that have not yet been forwarded */
32546 /* Lock is acquired by skb_dequeue when removing the skb */
32547@@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
32548 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
32549 skb_queue_head_init(&nesqp->pau_list);
32550 spin_lock_init(&nesqp->pau_lock);
32551- atomic_inc(&pau_qps_created);
32552+ atomic_inc_unchecked(&pau_qps_created);
32553 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
32554 }
32555
32556diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
32557index f3a3ecf..57d311d 100644
32558--- a/drivers/infiniband/hw/nes/nes_nic.c
32559+++ b/drivers/infiniband/hw/nes/nes_nic.c
32560@@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
32561 target_stat_values[++index] = mh_detected;
32562 target_stat_values[++index] = mh_pauses_sent;
32563 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
32564- target_stat_values[++index] = atomic_read(&cm_connects);
32565- target_stat_values[++index] = atomic_read(&cm_accepts);
32566- target_stat_values[++index] = atomic_read(&cm_disconnects);
32567- target_stat_values[++index] = atomic_read(&cm_connecteds);
32568- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
32569- target_stat_values[++index] = atomic_read(&cm_rejects);
32570- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
32571- target_stat_values[++index] = atomic_read(&qps_created);
32572- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
32573- target_stat_values[++index] = atomic_read(&qps_destroyed);
32574- target_stat_values[++index] = atomic_read(&cm_closes);
32575+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
32576+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
32577+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
32578+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
32579+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
32580+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
32581+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
32582+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
32583+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
32584+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
32585+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
32586 target_stat_values[++index] = cm_packets_sent;
32587 target_stat_values[++index] = cm_packets_bounced;
32588 target_stat_values[++index] = cm_packets_created;
32589 target_stat_values[++index] = cm_packets_received;
32590 target_stat_values[++index] = cm_packets_dropped;
32591 target_stat_values[++index] = cm_packets_retrans;
32592- target_stat_values[++index] = atomic_read(&cm_listens_created);
32593- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
32594+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
32595+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
32596 target_stat_values[++index] = cm_backlog_drops;
32597- target_stat_values[++index] = atomic_read(&cm_loopbacks);
32598- target_stat_values[++index] = atomic_read(&cm_nodes_created);
32599- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
32600- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
32601- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
32602+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
32603+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
32604+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
32605+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
32606+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
32607 target_stat_values[++index] = nesadapter->free_4kpbl;
32608 target_stat_values[++index] = nesadapter->free_256pbl;
32609 target_stat_values[++index] = int_mod_timer_init;
32610 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
32611 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
32612 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
32613- target_stat_values[++index] = atomic_read(&pau_qps_created);
32614- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
32615+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
32616+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
32617 }
32618
32619 /**
32620diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
32621index 8b8812d..a5e1133 100644
32622--- a/drivers/infiniband/hw/nes/nes_verbs.c
32623+++ b/drivers/infiniband/hw/nes/nes_verbs.c
32624@@ -46,9 +46,9 @@
32625
32626 #include <rdma/ib_umem.h>
32627
32628-atomic_t mod_qp_timouts;
32629-atomic_t qps_created;
32630-atomic_t sw_qps_destroyed;
32631+atomic_unchecked_t mod_qp_timouts;
32632+atomic_unchecked_t qps_created;
32633+atomic_unchecked_t sw_qps_destroyed;
32634
32635 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
32636
32637@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
32638 if (init_attr->create_flags)
32639 return ERR_PTR(-EINVAL);
32640
32641- atomic_inc(&qps_created);
32642+ atomic_inc_unchecked(&qps_created);
32643 switch (init_attr->qp_type) {
32644 case IB_QPT_RC:
32645 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
32646@@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
32647 struct iw_cm_event cm_event;
32648 int ret = 0;
32649
32650- atomic_inc(&sw_qps_destroyed);
32651+ atomic_inc_unchecked(&sw_qps_destroyed);
32652 nesqp->destroyed = 1;
32653
32654 /* Blow away the connection if it exists. */
32655diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
32656index 6b811e3..f8acf88 100644
32657--- a/drivers/infiniband/hw/qib/qib.h
32658+++ b/drivers/infiniband/hw/qib/qib.h
32659@@ -51,6 +51,7 @@
32660 #include <linux/completion.h>
32661 #include <linux/kref.h>
32662 #include <linux/sched.h>
32663+#include <linux/slab.h>
32664
32665 #include "qib_common.h"
32666 #include "qib_verbs.h"
32667diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
32668index da739d9..da1c7f4 100644
32669--- a/drivers/input/gameport/gameport.c
32670+++ b/drivers/input/gameport/gameport.c
32671@@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
32672 */
32673 static void gameport_init_port(struct gameport *gameport)
32674 {
32675- static atomic_t gameport_no = ATOMIC_INIT(0);
32676+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
32677
32678 __module_get(THIS_MODULE);
32679
32680 mutex_init(&gameport->drv_mutex);
32681 device_initialize(&gameport->dev);
32682 dev_set_name(&gameport->dev, "gameport%lu",
32683- (unsigned long)atomic_inc_return(&gameport_no) - 1);
32684+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
32685 gameport->dev.bus = &gameport_bus;
32686 gameport->dev.release = gameport_release_port;
32687 if (gameport->parent)
32688diff --git a/drivers/input/input.c b/drivers/input/input.c
32689index 8921c61..f5cd63d 100644
32690--- a/drivers/input/input.c
32691+++ b/drivers/input/input.c
32692@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
32693 */
32694 int input_register_device(struct input_dev *dev)
32695 {
32696- static atomic_t input_no = ATOMIC_INIT(0);
32697+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
32698 struct input_handler *handler;
32699 const char *path;
32700 int error;
32701@@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
32702 dev->setkeycode = input_default_setkeycode;
32703
32704 dev_set_name(&dev->dev, "input%ld",
32705- (unsigned long) atomic_inc_return(&input_no) - 1);
32706+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
32707
32708 error = device_add(&dev->dev);
32709 if (error)
32710diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
32711index b8d8611..7a4a04b 100644
32712--- a/drivers/input/joystick/sidewinder.c
32713+++ b/drivers/input/joystick/sidewinder.c
32714@@ -30,6 +30,7 @@
32715 #include <linux/kernel.h>
32716 #include <linux/module.h>
32717 #include <linux/slab.h>
32718+#include <linux/sched.h>
32719 #include <linux/init.h>
32720 #include <linux/input.h>
32721 #include <linux/gameport.h>
32722diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
32723index fd7a0d5..a4af10c 100644
32724--- a/drivers/input/joystick/xpad.c
32725+++ b/drivers/input/joystick/xpad.c
32726@@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
32727
32728 static int xpad_led_probe(struct usb_xpad *xpad)
32729 {
32730- static atomic_t led_seq = ATOMIC_INIT(0);
32731+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
32732 long led_no;
32733 struct xpad_led *led;
32734 struct led_classdev *led_cdev;
32735@@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
32736 if (!led)
32737 return -ENOMEM;
32738
32739- led_no = (long)atomic_inc_return(&led_seq) - 1;
32740+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
32741
32742 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
32743 led->xpad = xpad;
32744diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
32745index 0110b5a..d3ad144 100644
32746--- a/drivers/input/mousedev.c
32747+++ b/drivers/input/mousedev.c
32748@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
32749
32750 spin_unlock_irq(&client->packet_lock);
32751
32752- if (copy_to_user(buffer, data, count))
32753+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
32754 return -EFAULT;
32755
32756 return count;
32757diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
32758index d0f7533..fb8215b 100644
32759--- a/drivers/input/serio/serio.c
32760+++ b/drivers/input/serio/serio.c
32761@@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
32762 */
32763 static void serio_init_port(struct serio *serio)
32764 {
32765- static atomic_t serio_no = ATOMIC_INIT(0);
32766+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
32767
32768 __module_get(THIS_MODULE);
32769
32770@@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
32771 mutex_init(&serio->drv_mutex);
32772 device_initialize(&serio->dev);
32773 dev_set_name(&serio->dev, "serio%ld",
32774- (long)atomic_inc_return(&serio_no) - 1);
32775+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
32776 serio->dev.bus = &serio_bus;
32777 serio->dev.release = serio_release_port;
32778 serio->dev.groups = serio_device_attr_groups;
32779diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
32780index b902794..fc7b85b 100644
32781--- a/drivers/isdn/capi/capi.c
32782+++ b/drivers/isdn/capi/capi.c
32783@@ -83,8 +83,8 @@ struct capiminor {
32784
32785 struct capi20_appl *ap;
32786 u32 ncci;
32787- atomic_t datahandle;
32788- atomic_t msgid;
32789+ atomic_unchecked_t datahandle;
32790+ atomic_unchecked_t msgid;
32791
32792 struct tty_port port;
32793 int ttyinstop;
32794@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
32795 capimsg_setu16(s, 2, mp->ap->applid);
32796 capimsg_setu8 (s, 4, CAPI_DATA_B3);
32797 capimsg_setu8 (s, 5, CAPI_RESP);
32798- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
32799+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
32800 capimsg_setu32(s, 8, mp->ncci);
32801 capimsg_setu16(s, 12, datahandle);
32802 }
32803@@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
32804 mp->outbytes -= len;
32805 spin_unlock_bh(&mp->outlock);
32806
32807- datahandle = atomic_inc_return(&mp->datahandle);
32808+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
32809 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
32810 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32811 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32812 capimsg_setu16(skb->data, 2, mp->ap->applid);
32813 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
32814 capimsg_setu8 (skb->data, 5, CAPI_REQ);
32815- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
32816+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
32817 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
32818 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
32819 capimsg_setu16(skb->data, 16, len); /* Data length */
32820diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
32821index 821f7ac..28d4030 100644
32822--- a/drivers/isdn/hardware/avm/b1.c
32823+++ b/drivers/isdn/hardware/avm/b1.c
32824@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
32825 }
32826 if (left) {
32827 if (t4file->user) {
32828- if (copy_from_user(buf, dp, left))
32829+ if (left > sizeof buf || copy_from_user(buf, dp, left))
32830 return -EFAULT;
32831 } else {
32832 memcpy(buf, dp, left);
32833@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
32834 }
32835 if (left) {
32836 if (config->user) {
32837- if (copy_from_user(buf, dp, left))
32838+ if (left > sizeof buf || copy_from_user(buf, dp, left))
32839 return -EFAULT;
32840 } else {
32841 memcpy(buf, dp, left);
32842diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
32843index dd6b53a..19d9ee6 100644
32844--- a/drivers/isdn/hardware/eicon/divasync.h
32845+++ b/drivers/isdn/hardware/eicon/divasync.h
32846@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
32847 } diva_didd_add_adapter_t;
32848 typedef struct _diva_didd_remove_adapter {
32849 IDI_CALL p_request;
32850-} diva_didd_remove_adapter_t;
32851+} __no_const diva_didd_remove_adapter_t;
32852 typedef struct _diva_didd_read_adapter_array {
32853 void *buffer;
32854 dword length;
32855diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
32856index d303e65..28bcb7b 100644
32857--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
32858+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
32859@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
32860 typedef struct _diva_os_idi_adapter_interface {
32861 diva_init_card_proc_t cleanup_adapter_proc;
32862 diva_cmd_card_proc_t cmd_proc;
32863-} diva_os_idi_adapter_interface_t;
32864+} __no_const diva_os_idi_adapter_interface_t;
32865
32866 typedef struct _diva_os_xdi_adapter {
32867 struct list_head link;
32868diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
32869index e74df7c..03a03ba 100644
32870--- a/drivers/isdn/icn/icn.c
32871+++ b/drivers/isdn/icn/icn.c
32872@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
32873 if (count > len)
32874 count = len;
32875 if (user) {
32876- if (copy_from_user(msg, buf, count))
32877+ if (count > sizeof msg || copy_from_user(msg, buf, count))
32878 return -EFAULT;
32879 } else
32880 memcpy(msg, buf, count);
32881diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
32882index 8bc4915..4cc6a2e 100644
32883--- a/drivers/leds/leds-mc13783.c
32884+++ b/drivers/leds/leds-mc13783.c
32885@@ -280,7 +280,7 @@ static int __devinit mc13783_led_probe(struct platform_device *pdev)
32886 return -EINVAL;
32887 }
32888
32889- led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
32890+ led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
32891 if (led == NULL) {
32892 dev_err(&pdev->dev, "failed to alloc memory\n");
32893 return -ENOMEM;
32894diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
32895index b5fdcb7..5b6c59f 100644
32896--- a/drivers/lguest/core.c
32897+++ b/drivers/lguest/core.c
32898@@ -92,9 +92,17 @@ static __init int map_switcher(void)
32899 * it's worked so far. The end address needs +1 because __get_vm_area
32900 * allocates an extra guard page, so we need space for that.
32901 */
32902+
32903+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
32904+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32905+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
32906+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32907+#else
32908 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32909 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
32910 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32911+#endif
32912+
32913 if (!switcher_vma) {
32914 err = -ENOMEM;
32915 printk("lguest: could not map switcher pages high\n");
32916@@ -119,7 +127,7 @@ static __init int map_switcher(void)
32917 * Now the Switcher is mapped at the right address, we can't fail!
32918 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
32919 */
32920- memcpy(switcher_vma->addr, start_switcher_text,
32921+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
32922 end_switcher_text - start_switcher_text);
32923
32924 printk(KERN_INFO "lguest: mapped switcher at %p\n",
32925diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
32926index 3980903..ce25c5e 100644
32927--- a/drivers/lguest/x86/core.c
32928+++ b/drivers/lguest/x86/core.c
32929@@ -59,7 +59,7 @@ static struct {
32930 /* Offset from where switcher.S was compiled to where we've copied it */
32931 static unsigned long switcher_offset(void)
32932 {
32933- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
32934+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
32935 }
32936
32937 /* This cpu's struct lguest_pages. */
32938@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
32939 * These copies are pretty cheap, so we do them unconditionally: */
32940 /* Save the current Host top-level page directory.
32941 */
32942+
32943+#ifdef CONFIG_PAX_PER_CPU_PGD
32944+ pages->state.host_cr3 = read_cr3();
32945+#else
32946 pages->state.host_cr3 = __pa(current->mm->pgd);
32947+#endif
32948+
32949 /*
32950 * Set up the Guest's page tables to see this CPU's pages (and no
32951 * other CPU's pages).
32952@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
32953 * compiled-in switcher code and the high-mapped copy we just made.
32954 */
32955 for (i = 0; i < IDT_ENTRIES; i++)
32956- default_idt_entries[i] += switcher_offset();
32957+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
32958
32959 /*
32960 * Set up the Switcher's per-cpu areas.
32961@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
32962 * it will be undisturbed when we switch. To change %cs and jump we
32963 * need this structure to feed to Intel's "lcall" instruction.
32964 */
32965- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
32966+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
32967 lguest_entry.segment = LGUEST_CS;
32968
32969 /*
32970diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
32971index 40634b0..4f5855e 100644
32972--- a/drivers/lguest/x86/switcher_32.S
32973+++ b/drivers/lguest/x86/switcher_32.S
32974@@ -87,6 +87,7 @@
32975 #include <asm/page.h>
32976 #include <asm/segment.h>
32977 #include <asm/lguest.h>
32978+#include <asm/processor-flags.h>
32979
32980 // We mark the start of the code to copy
32981 // It's placed in .text tho it's never run here
32982@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
32983 // Changes type when we load it: damn Intel!
32984 // For after we switch over our page tables
32985 // That entry will be read-only: we'd crash.
32986+
32987+#ifdef CONFIG_PAX_KERNEXEC
32988+ mov %cr0, %edx
32989+ xor $X86_CR0_WP, %edx
32990+ mov %edx, %cr0
32991+#endif
32992+
32993 movl $(GDT_ENTRY_TSS*8), %edx
32994 ltr %dx
32995
32996@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
32997 // Let's clear it again for our return.
32998 // The GDT descriptor of the Host
32999 // Points to the table after two "size" bytes
33000- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
33001+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
33002 // Clear "used" from type field (byte 5, bit 2)
33003- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
33004+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
33005+
33006+#ifdef CONFIG_PAX_KERNEXEC
33007+ mov %cr0, %eax
33008+ xor $X86_CR0_WP, %eax
33009+ mov %eax, %cr0
33010+#endif
33011
33012 // Once our page table's switched, the Guest is live!
33013 // The Host fades as we run this final step.
33014@@ -295,13 +309,12 @@ deliver_to_host:
33015 // I consulted gcc, and it gave
33016 // These instructions, which I gladly credit:
33017 leal (%edx,%ebx,8), %eax
33018- movzwl (%eax),%edx
33019- movl 4(%eax), %eax
33020- xorw %ax, %ax
33021- orl %eax, %edx
33022+ movl 4(%eax), %edx
33023+ movw (%eax), %dx
33024 // Now the address of the handler's in %edx
33025 // We call it now: its "iret" drops us home.
33026- jmp *%edx
33027+ ljmp $__KERNEL_CS, $1f
33028+1: jmp *%edx
33029
33030 // Every interrupt can come to us here
33031 // But we must truly tell each apart.
33032diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
33033index 20e5c2c..9e849a9 100644
33034--- a/drivers/macintosh/macio_asic.c
33035+++ b/drivers/macintosh/macio_asic.c
33036@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
33037 * MacIO is matched against any Apple ID, it's probe() function
33038 * will then decide wether it applies or not
33039 */
33040-static const struct pci_device_id __devinitdata pci_ids [] = { {
33041+static const struct pci_device_id __devinitconst pci_ids [] = { {
33042 .vendor = PCI_VENDOR_ID_APPLE,
33043 .device = PCI_ANY_ID,
33044 .subvendor = PCI_ANY_ID,
33045diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
33046index 17e2b47..bcbeec4 100644
33047--- a/drivers/md/bitmap.c
33048+++ b/drivers/md/bitmap.c
33049@@ -1823,7 +1823,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
33050 chunk_kb ? "KB" : "B");
33051 if (bitmap->file) {
33052 seq_printf(seq, ", file: ");
33053- seq_path(seq, &bitmap->file->f_path, " \t\n");
33054+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
33055 }
33056
33057 seq_printf(seq, "\n");
33058diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
33059index a1a3e6d..1918bfc 100644
33060--- a/drivers/md/dm-ioctl.c
33061+++ b/drivers/md/dm-ioctl.c
33062@@ -1590,7 +1590,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
33063 cmd == DM_LIST_VERSIONS_CMD)
33064 return 0;
33065
33066- if ((cmd == DM_DEV_CREATE_CMD)) {
33067+ if (cmd == DM_DEV_CREATE_CMD) {
33068 if (!*param->name) {
33069 DMWARN("name not supplied when creating device");
33070 return -EINVAL;
33071diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
33072index d039de8..0cf5b87 100644
33073--- a/drivers/md/dm-raid1.c
33074+++ b/drivers/md/dm-raid1.c
33075@@ -40,7 +40,7 @@ enum dm_raid1_error {
33076
33077 struct mirror {
33078 struct mirror_set *ms;
33079- atomic_t error_count;
33080+ atomic_unchecked_t error_count;
33081 unsigned long error_type;
33082 struct dm_dev *dev;
33083 sector_t offset;
33084@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
33085 struct mirror *m;
33086
33087 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
33088- if (!atomic_read(&m->error_count))
33089+ if (!atomic_read_unchecked(&m->error_count))
33090 return m;
33091
33092 return NULL;
33093@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
33094 * simple way to tell if a device has encountered
33095 * errors.
33096 */
33097- atomic_inc(&m->error_count);
33098+ atomic_inc_unchecked(&m->error_count);
33099
33100 if (test_and_set_bit(error_type, &m->error_type))
33101 return;
33102@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
33103 struct mirror *m = get_default_mirror(ms);
33104
33105 do {
33106- if (likely(!atomic_read(&m->error_count)))
33107+ if (likely(!atomic_read_unchecked(&m->error_count)))
33108 return m;
33109
33110 if (m-- == ms->mirror)
33111@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
33112 {
33113 struct mirror *default_mirror = get_default_mirror(m->ms);
33114
33115- return !atomic_read(&default_mirror->error_count);
33116+ return !atomic_read_unchecked(&default_mirror->error_count);
33117 }
33118
33119 static int mirror_available(struct mirror_set *ms, struct bio *bio)
33120@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
33121 */
33122 if (likely(region_in_sync(ms, region, 1)))
33123 m = choose_mirror(ms, bio->bi_sector);
33124- else if (m && atomic_read(&m->error_count))
33125+ else if (m && atomic_read_unchecked(&m->error_count))
33126 m = NULL;
33127
33128 if (likely(m))
33129@@ -938,7 +938,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
33130 }
33131
33132 ms->mirror[mirror].ms = ms;
33133- atomic_set(&(ms->mirror[mirror].error_count), 0);
33134+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
33135 ms->mirror[mirror].error_type = 0;
33136 ms->mirror[mirror].offset = offset;
33137
33138@@ -1351,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
33139 */
33140 static char device_status_char(struct mirror *m)
33141 {
33142- if (!atomic_read(&(m->error_count)))
33143+ if (!atomic_read_unchecked(&(m->error_count)))
33144 return 'A';
33145
33146 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
33147diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
33148index 35c94ff..20d4c17 100644
33149--- a/drivers/md/dm-stripe.c
33150+++ b/drivers/md/dm-stripe.c
33151@@ -20,7 +20,7 @@ struct stripe {
33152 struct dm_dev *dev;
33153 sector_t physical_start;
33154
33155- atomic_t error_count;
33156+ atomic_unchecked_t error_count;
33157 };
33158
33159 struct stripe_c {
33160@@ -193,7 +193,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
33161 kfree(sc);
33162 return r;
33163 }
33164- atomic_set(&(sc->stripe[i].error_count), 0);
33165+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
33166 }
33167
33168 ti->private = sc;
33169@@ -315,7 +315,7 @@ static int stripe_status(struct dm_target *ti,
33170 DMEMIT("%d ", sc->stripes);
33171 for (i = 0; i < sc->stripes; i++) {
33172 DMEMIT("%s ", sc->stripe[i].dev->name);
33173- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
33174+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
33175 'D' : 'A';
33176 }
33177 buffer[i] = '\0';
33178@@ -362,8 +362,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
33179 */
33180 for (i = 0; i < sc->stripes; i++)
33181 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
33182- atomic_inc(&(sc->stripe[i].error_count));
33183- if (atomic_read(&(sc->stripe[i].error_count)) <
33184+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
33185+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
33186 DM_IO_ERROR_THRESHOLD)
33187 schedule_work(&sc->trigger_event);
33188 }
33189diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
33190index 2e227fb..44ead1f 100644
33191--- a/drivers/md/dm-table.c
33192+++ b/drivers/md/dm-table.c
33193@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
33194 if (!dev_size)
33195 return 0;
33196
33197- if ((start >= dev_size) || (start + len > dev_size)) {
33198+ if ((start >= dev_size) || (len > dev_size - start)) {
33199 DMWARN("%s: %s too small for target: "
33200 "start=%llu, len=%llu, dev_size=%llu",
33201 dm_device_name(ti->table->md), bdevname(bdev, b),
33202diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
33203index 737d388..811ad5a 100644
33204--- a/drivers/md/dm-thin-metadata.c
33205+++ b/drivers/md/dm-thin-metadata.c
33206@@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33207
33208 pmd->info.tm = tm;
33209 pmd->info.levels = 2;
33210- pmd->info.value_type.context = pmd->data_sm;
33211+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33212 pmd->info.value_type.size = sizeof(__le64);
33213 pmd->info.value_type.inc = data_block_inc;
33214 pmd->info.value_type.dec = data_block_dec;
33215@@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33216
33217 pmd->bl_info.tm = tm;
33218 pmd->bl_info.levels = 1;
33219- pmd->bl_info.value_type.context = pmd->data_sm;
33220+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33221 pmd->bl_info.value_type.size = sizeof(__le64);
33222 pmd->bl_info.value_type.inc = data_block_inc;
33223 pmd->bl_info.value_type.dec = data_block_dec;
33224diff --git a/drivers/md/dm.c b/drivers/md/dm.c
33225index e24143c..ce2f21a1 100644
33226--- a/drivers/md/dm.c
33227+++ b/drivers/md/dm.c
33228@@ -176,9 +176,9 @@ struct mapped_device {
33229 /*
33230 * Event handling.
33231 */
33232- atomic_t event_nr;
33233+ atomic_unchecked_t event_nr;
33234 wait_queue_head_t eventq;
33235- atomic_t uevent_seq;
33236+ atomic_unchecked_t uevent_seq;
33237 struct list_head uevent_list;
33238 spinlock_t uevent_lock; /* Protect access to uevent_list */
33239
33240@@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor)
33241 rwlock_init(&md->map_lock);
33242 atomic_set(&md->holders, 1);
33243 atomic_set(&md->open_count, 0);
33244- atomic_set(&md->event_nr, 0);
33245- atomic_set(&md->uevent_seq, 0);
33246+ atomic_set_unchecked(&md->event_nr, 0);
33247+ atomic_set_unchecked(&md->uevent_seq, 0);
33248 INIT_LIST_HEAD(&md->uevent_list);
33249 spin_lock_init(&md->uevent_lock);
33250
33251@@ -1980,7 +1980,7 @@ static void event_callback(void *context)
33252
33253 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
33254
33255- atomic_inc(&md->event_nr);
33256+ atomic_inc_unchecked(&md->event_nr);
33257 wake_up(&md->eventq);
33258 }
33259
33260@@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
33261
33262 uint32_t dm_next_uevent_seq(struct mapped_device *md)
33263 {
33264- return atomic_add_return(1, &md->uevent_seq);
33265+ return atomic_add_return_unchecked(1, &md->uevent_seq);
33266 }
33267
33268 uint32_t dm_get_event_nr(struct mapped_device *md)
33269 {
33270- return atomic_read(&md->event_nr);
33271+ return atomic_read_unchecked(&md->event_nr);
33272 }
33273
33274 int dm_wait_event(struct mapped_device *md, int event_nr)
33275 {
33276 return wait_event_interruptible(md->eventq,
33277- (event_nr != atomic_read(&md->event_nr)));
33278+ (event_nr != atomic_read_unchecked(&md->event_nr)));
33279 }
33280
33281 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
33282diff --git a/drivers/md/md.c b/drivers/md/md.c
33283index 2b30ffd..bf789ce 100644
33284--- a/drivers/md/md.c
33285+++ b/drivers/md/md.c
33286@@ -277,10 +277,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
33287 * start build, activate spare
33288 */
33289 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
33290-static atomic_t md_event_count;
33291+static atomic_unchecked_t md_event_count;
33292 void md_new_event(struct mddev *mddev)
33293 {
33294- atomic_inc(&md_event_count);
33295+ atomic_inc_unchecked(&md_event_count);
33296 wake_up(&md_event_waiters);
33297 }
33298 EXPORT_SYMBOL_GPL(md_new_event);
33299@@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
33300 */
33301 static void md_new_event_inintr(struct mddev *mddev)
33302 {
33303- atomic_inc(&md_event_count);
33304+ atomic_inc_unchecked(&md_event_count);
33305 wake_up(&md_event_waiters);
33306 }
33307
33308@@ -1526,7 +1526,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
33309
33310 rdev->preferred_minor = 0xffff;
33311 rdev->data_offset = le64_to_cpu(sb->data_offset);
33312- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33313+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33314
33315 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
33316 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
33317@@ -1745,7 +1745,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
33318 else
33319 sb->resync_offset = cpu_to_le64(0);
33320
33321- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
33322+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
33323
33324 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
33325 sb->size = cpu_to_le64(mddev->dev_sectors);
33326@@ -2691,7 +2691,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
33327 static ssize_t
33328 errors_show(struct md_rdev *rdev, char *page)
33329 {
33330- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
33331+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
33332 }
33333
33334 static ssize_t
33335@@ -2700,7 +2700,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
33336 char *e;
33337 unsigned long n = simple_strtoul(buf, &e, 10);
33338 if (*buf && (*e == 0 || *e == '\n')) {
33339- atomic_set(&rdev->corrected_errors, n);
33340+ atomic_set_unchecked(&rdev->corrected_errors, n);
33341 return len;
33342 }
33343 return -EINVAL;
33344@@ -3086,8 +3086,8 @@ int md_rdev_init(struct md_rdev *rdev)
33345 rdev->sb_loaded = 0;
33346 rdev->bb_page = NULL;
33347 atomic_set(&rdev->nr_pending, 0);
33348- atomic_set(&rdev->read_errors, 0);
33349- atomic_set(&rdev->corrected_errors, 0);
33350+ atomic_set_unchecked(&rdev->read_errors, 0);
33351+ atomic_set_unchecked(&rdev->corrected_errors, 0);
33352
33353 INIT_LIST_HEAD(&rdev->same_set);
33354 init_waitqueue_head(&rdev->blocked_wait);
33355@@ -6738,7 +6738,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
33356
33357 spin_unlock(&pers_lock);
33358 seq_printf(seq, "\n");
33359- seq->poll_event = atomic_read(&md_event_count);
33360+ seq->poll_event = atomic_read_unchecked(&md_event_count);
33361 return 0;
33362 }
33363 if (v == (void*)2) {
33364@@ -6841,7 +6841,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
33365 return error;
33366
33367 seq = file->private_data;
33368- seq->poll_event = atomic_read(&md_event_count);
33369+ seq->poll_event = atomic_read_unchecked(&md_event_count);
33370 return error;
33371 }
33372
33373@@ -6855,7 +6855,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
33374 /* always allow read */
33375 mask = POLLIN | POLLRDNORM;
33376
33377- if (seq->poll_event != atomic_read(&md_event_count))
33378+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
33379 mask |= POLLERR | POLLPRI;
33380 return mask;
33381 }
33382@@ -6899,7 +6899,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
33383 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
33384 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
33385 (int)part_stat_read(&disk->part0, sectors[1]) -
33386- atomic_read(&disk->sync_io);
33387+ atomic_read_unchecked(&disk->sync_io);
33388 /* sync IO will cause sync_io to increase before the disk_stats
33389 * as sync_io is counted when a request starts, and
33390 * disk_stats is counted when it completes.
33391diff --git a/drivers/md/md.h b/drivers/md/md.h
33392index 1c2063c..9639970 100644
33393--- a/drivers/md/md.h
33394+++ b/drivers/md/md.h
33395@@ -93,13 +93,13 @@ struct md_rdev {
33396 * only maintained for arrays that
33397 * support hot removal
33398 */
33399- atomic_t read_errors; /* number of consecutive read errors that
33400+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
33401 * we have tried to ignore.
33402 */
33403 struct timespec last_read_error; /* monotonic time since our
33404 * last read error
33405 */
33406- atomic_t corrected_errors; /* number of corrected read errors,
33407+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
33408 * for reporting to userspace and storing
33409 * in superblock.
33410 */
33411@@ -429,7 +429,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
33412
33413 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
33414 {
33415- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33416+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33417 }
33418
33419 struct md_personality
33420diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
33421index 50ed53b..4f29d7d 100644
33422--- a/drivers/md/persistent-data/dm-space-map-checker.c
33423+++ b/drivers/md/persistent-data/dm-space-map-checker.c
33424@@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
33425 /*----------------------------------------------------------------*/
33426
33427 struct sm_checker {
33428- struct dm_space_map sm;
33429+ dm_space_map_no_const sm;
33430
33431 struct count_array old_counts;
33432 struct count_array counts;
33433diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
33434index fc469ba..2d91555 100644
33435--- a/drivers/md/persistent-data/dm-space-map-disk.c
33436+++ b/drivers/md/persistent-data/dm-space-map-disk.c
33437@@ -23,7 +23,7 @@
33438 * Space map interface.
33439 */
33440 struct sm_disk {
33441- struct dm_space_map sm;
33442+ dm_space_map_no_const sm;
33443
33444 struct ll_disk ll;
33445 struct ll_disk old_ll;
33446diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
33447index e89ae5e..062e4c2 100644
33448--- a/drivers/md/persistent-data/dm-space-map-metadata.c
33449+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
33450@@ -43,7 +43,7 @@ struct block_op {
33451 };
33452
33453 struct sm_metadata {
33454- struct dm_space_map sm;
33455+ dm_space_map_no_const sm;
33456
33457 struct ll_disk ll;
33458 struct ll_disk old_ll;
33459diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
33460index 1cbfc6b..56e1dbb 100644
33461--- a/drivers/md/persistent-data/dm-space-map.h
33462+++ b/drivers/md/persistent-data/dm-space-map.h
33463@@ -60,6 +60,7 @@ struct dm_space_map {
33464 int (*root_size)(struct dm_space_map *sm, size_t *result);
33465 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
33466 };
33467+typedef struct dm_space_map __no_const dm_space_map_no_const;
33468
33469 /*----------------------------------------------------------------*/
33470
33471diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
33472index d7e9577..faa512f2 100644
33473--- a/drivers/md/raid1.c
33474+++ b/drivers/md/raid1.c
33475@@ -1688,7 +1688,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
33476 if (r1_sync_page_io(rdev, sect, s,
33477 bio->bi_io_vec[idx].bv_page,
33478 READ) != 0)
33479- atomic_add(s, &rdev->corrected_errors);
33480+ atomic_add_unchecked(s, &rdev->corrected_errors);
33481 }
33482 sectors -= s;
33483 sect += s;
33484@@ -1902,7 +1902,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
33485 test_bit(In_sync, &rdev->flags)) {
33486 if (r1_sync_page_io(rdev, sect, s,
33487 conf->tmppage, READ)) {
33488- atomic_add(s, &rdev->corrected_errors);
33489+ atomic_add_unchecked(s, &rdev->corrected_errors);
33490 printk(KERN_INFO
33491 "md/raid1:%s: read error corrected "
33492 "(%d sectors at %llu on %s)\n",
33493diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
33494index d037adb..ed17dc9 100644
33495--- a/drivers/md/raid10.c
33496+++ b/drivers/md/raid10.c
33497@@ -1684,7 +1684,7 @@ static void end_sync_read(struct bio *bio, int error)
33498 /* The write handler will notice the lack of
33499 * R10BIO_Uptodate and record any errors etc
33500 */
33501- atomic_add(r10_bio->sectors,
33502+ atomic_add_unchecked(r10_bio->sectors,
33503 &conf->mirrors[d].rdev->corrected_errors);
33504
33505 /* for reconstruct, we always reschedule after a read.
33506@@ -2033,7 +2033,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33507 {
33508 struct timespec cur_time_mon;
33509 unsigned long hours_since_last;
33510- unsigned int read_errors = atomic_read(&rdev->read_errors);
33511+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
33512
33513 ktime_get_ts(&cur_time_mon);
33514
33515@@ -2055,9 +2055,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33516 * overflowing the shift of read_errors by hours_since_last.
33517 */
33518 if (hours_since_last >= 8 * sizeof(read_errors))
33519- atomic_set(&rdev->read_errors, 0);
33520+ atomic_set_unchecked(&rdev->read_errors, 0);
33521 else
33522- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
33523+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
33524 }
33525
33526 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
33527@@ -2111,8 +2111,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33528 return;
33529
33530 check_decay_read_errors(mddev, rdev);
33531- atomic_inc(&rdev->read_errors);
33532- if (atomic_read(&rdev->read_errors) > max_read_errors) {
33533+ atomic_inc_unchecked(&rdev->read_errors);
33534+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
33535 char b[BDEVNAME_SIZE];
33536 bdevname(rdev->bdev, b);
33537
33538@@ -2120,7 +2120,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33539 "md/raid10:%s: %s: Raid device exceeded "
33540 "read_error threshold [cur %d:max %d]\n",
33541 mdname(mddev), b,
33542- atomic_read(&rdev->read_errors), max_read_errors);
33543+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
33544 printk(KERN_NOTICE
33545 "md/raid10:%s: %s: Failing raid device\n",
33546 mdname(mddev), b);
33547@@ -2271,7 +2271,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33548 (unsigned long long)(
33549 sect + rdev->data_offset),
33550 bdevname(rdev->bdev, b));
33551- atomic_add(s, &rdev->corrected_errors);
33552+ atomic_add_unchecked(s, &rdev->corrected_errors);
33553 }
33554
33555 rdev_dec_pending(rdev, mddev);
33556diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
33557index f351422..85c01bb 100644
33558--- a/drivers/md/raid5.c
33559+++ b/drivers/md/raid5.c
33560@@ -1686,18 +1686,18 @@ static void raid5_end_read_request(struct bio * bi, int error)
33561 (unsigned long long)(sh->sector
33562 + rdev->data_offset),
33563 bdevname(rdev->bdev, b));
33564- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
33565+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
33566 clear_bit(R5_ReadError, &sh->dev[i].flags);
33567 clear_bit(R5_ReWrite, &sh->dev[i].flags);
33568 }
33569- if (atomic_read(&rdev->read_errors))
33570- atomic_set(&rdev->read_errors, 0);
33571+ if (atomic_read_unchecked(&rdev->read_errors))
33572+ atomic_set_unchecked(&rdev->read_errors, 0);
33573 } else {
33574 const char *bdn = bdevname(rdev->bdev, b);
33575 int retry = 0;
33576
33577 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
33578- atomic_inc(&rdev->read_errors);
33579+ atomic_inc_unchecked(&rdev->read_errors);
33580 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
33581 printk_ratelimited(
33582 KERN_WARNING
33583@@ -1726,7 +1726,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
33584 (unsigned long long)(sh->sector
33585 + rdev->data_offset),
33586 bdn);
33587- else if (atomic_read(&rdev->read_errors)
33588+ else if (atomic_read_unchecked(&rdev->read_errors)
33589 > conf->max_nr_stripes)
33590 printk(KERN_WARNING
33591 "md/raid:%s: Too many read errors, failing device %s.\n",
33592diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
33593index d88c4aa..17c80b1 100644
33594--- a/drivers/media/dvb/ddbridge/ddbridge-core.c
33595+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
33596@@ -1679,7 +1679,7 @@ static struct ddb_info ddb_v6 = {
33597 .subvendor = _subvend, .subdevice = _subdev, \
33598 .driver_data = (unsigned long)&_driverdata }
33599
33600-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
33601+static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
33602 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
33603 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
33604 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
33605diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
33606index a7d876f..8c21b61 100644
33607--- a/drivers/media/dvb/dvb-core/dvb_demux.h
33608+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
33609@@ -73,7 +73,7 @@ struct dvb_demux_feed {
33610 union {
33611 dmx_ts_cb ts;
33612 dmx_section_cb sec;
33613- } cb;
33614+ } __no_const cb;
33615
33616 struct dvb_demux *demux;
33617 void *priv;
33618diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
33619index 00a6732..70a682e 100644
33620--- a/drivers/media/dvb/dvb-core/dvbdev.c
33621+++ b/drivers/media/dvb/dvb-core/dvbdev.c
33622@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
33623 const struct dvb_device *template, void *priv, int type)
33624 {
33625 struct dvb_device *dvbdev;
33626- struct file_operations *dvbdevfops;
33627+ file_operations_no_const *dvbdevfops;
33628 struct device *clsdev;
33629 int minor;
33630 int id;
33631diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
33632index 3940bb0..fb3952a 100644
33633--- a/drivers/media/dvb/dvb-usb/cxusb.c
33634+++ b/drivers/media/dvb/dvb-usb/cxusb.c
33635@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
33636
33637 struct dib0700_adapter_state {
33638 int (*set_param_save) (struct dvb_frontend *);
33639-};
33640+} __no_const;
33641
33642 static int dib7070_set_param_override(struct dvb_frontend *fe)
33643 {
33644diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
33645index 451c5a7..649f711 100644
33646--- a/drivers/media/dvb/dvb-usb/dw2102.c
33647+++ b/drivers/media/dvb/dvb-usb/dw2102.c
33648@@ -95,7 +95,7 @@ struct su3000_state {
33649
33650 struct s6x0_state {
33651 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
33652-};
33653+} __no_const;
33654
33655 /* debug */
33656 static int dvb_usb_dw2102_debug;
33657diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
33658index 404f63a..4796533 100644
33659--- a/drivers/media/dvb/frontends/dib3000.h
33660+++ b/drivers/media/dvb/frontends/dib3000.h
33661@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
33662 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
33663 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
33664 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
33665-};
33666+} __no_const;
33667
33668 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
33669 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
33670diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
33671index 7539a5d..06531a6 100644
33672--- a/drivers/media/dvb/ngene/ngene-cards.c
33673+++ b/drivers/media/dvb/ngene/ngene-cards.c
33674@@ -478,7 +478,7 @@ static struct ngene_info ngene_info_m780 = {
33675
33676 /****************************************************************************/
33677
33678-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
33679+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
33680 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
33681 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
33682 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
33683diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
33684index 16a089f..1661b11 100644
33685--- a/drivers/media/radio/radio-cadet.c
33686+++ b/drivers/media/radio/radio-cadet.c
33687@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33688 unsigned char readbuf[RDS_BUFFER];
33689 int i = 0;
33690
33691+ if (count > RDS_BUFFER)
33692+ return -EFAULT;
33693 mutex_lock(&dev->lock);
33694 if (dev->rdsstat == 0) {
33695 dev->rdsstat = 1;
33696@@ -347,7 +349,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33697 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
33698 mutex_unlock(&dev->lock);
33699
33700- if (copy_to_user(data, readbuf, i))
33701+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
33702 return -EFAULT;
33703 return i;
33704 }
33705diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
33706index 9cde353..8c6a1c3 100644
33707--- a/drivers/media/video/au0828/au0828.h
33708+++ b/drivers/media/video/au0828/au0828.h
33709@@ -191,7 +191,7 @@ struct au0828_dev {
33710
33711 /* I2C */
33712 struct i2c_adapter i2c_adap;
33713- struct i2c_algorithm i2c_algo;
33714+ i2c_algorithm_no_const i2c_algo;
33715 struct i2c_client i2c_client;
33716 u32 i2c_rc;
33717
33718diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
33719index 04bf662..e0ac026 100644
33720--- a/drivers/media/video/cx88/cx88-alsa.c
33721+++ b/drivers/media/video/cx88/cx88-alsa.c
33722@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
33723 * Only boards with eeprom and byte 1 at eeprom=1 have it
33724 */
33725
33726-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
33727+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
33728 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33729 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33730 {0, }
33731diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
33732index 88cf9d9..bbc4b2c 100644
33733--- a/drivers/media/video/omap/omap_vout.c
33734+++ b/drivers/media/video/omap/omap_vout.c
33735@@ -64,7 +64,6 @@ enum omap_vout_channels {
33736 OMAP_VIDEO2,
33737 };
33738
33739-static struct videobuf_queue_ops video_vbq_ops;
33740 /* Variables configurable through module params*/
33741 static u32 video1_numbuffers = 3;
33742 static u32 video2_numbuffers = 3;
33743@@ -1000,6 +999,12 @@ static int omap_vout_open(struct file *file)
33744 {
33745 struct videobuf_queue *q;
33746 struct omap_vout_device *vout = NULL;
33747+ static struct videobuf_queue_ops video_vbq_ops = {
33748+ .buf_setup = omap_vout_buffer_setup,
33749+ .buf_prepare = omap_vout_buffer_prepare,
33750+ .buf_release = omap_vout_buffer_release,
33751+ .buf_queue = omap_vout_buffer_queue,
33752+ };
33753
33754 vout = video_drvdata(file);
33755 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
33756@@ -1017,10 +1022,6 @@ static int omap_vout_open(struct file *file)
33757 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
33758
33759 q = &vout->vbq;
33760- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
33761- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
33762- video_vbq_ops.buf_release = omap_vout_buffer_release;
33763- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
33764 spin_lock_init(&vout->vbq_lock);
33765
33766 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
33767diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33768index 305e6aa..0143317 100644
33769--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33770+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33771@@ -196,7 +196,7 @@ struct pvr2_hdw {
33772
33773 /* I2C stuff */
33774 struct i2c_adapter i2c_adap;
33775- struct i2c_algorithm i2c_algo;
33776+ i2c_algorithm_no_const i2c_algo;
33777 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
33778 int i2c_cx25840_hack_state;
33779 int i2c_linked;
33780diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
33781index 02194c0..091733b 100644
33782--- a/drivers/media/video/timblogiw.c
33783+++ b/drivers/media/video/timblogiw.c
33784@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
33785
33786 /* Platform device functions */
33787
33788-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33789+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
33790 .vidioc_querycap = timblogiw_querycap,
33791 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
33792 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
33793@@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33794 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
33795 };
33796
33797-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
33798+static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
33799 .owner = THIS_MODULE,
33800 .open = timblogiw_open,
33801 .release = timblogiw_close,
33802diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
33803index a5c591f..db692a3 100644
33804--- a/drivers/message/fusion/mptbase.c
33805+++ b/drivers/message/fusion/mptbase.c
33806@@ -6754,8 +6754,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
33807 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
33808 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
33809
33810+#ifdef CONFIG_GRKERNSEC_HIDESYM
33811+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
33812+#else
33813 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
33814 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
33815+#endif
33816+
33817 /*
33818 * Rounding UP to nearest 4-kB boundary here...
33819 */
33820diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
33821index 551262e..7551198 100644
33822--- a/drivers/message/fusion/mptsas.c
33823+++ b/drivers/message/fusion/mptsas.c
33824@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
33825 return 0;
33826 }
33827
33828+static inline void
33829+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33830+{
33831+ if (phy_info->port_details) {
33832+ phy_info->port_details->rphy = rphy;
33833+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33834+ ioc->name, rphy));
33835+ }
33836+
33837+ if (rphy) {
33838+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33839+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33840+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
33841+ ioc->name, rphy, rphy->dev.release));
33842+ }
33843+}
33844+
33845 /* no mutex */
33846 static void
33847 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
33848@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
33849 return NULL;
33850 }
33851
33852-static inline void
33853-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33854-{
33855- if (phy_info->port_details) {
33856- phy_info->port_details->rphy = rphy;
33857- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33858- ioc->name, rphy));
33859- }
33860-
33861- if (rphy) {
33862- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33863- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33864- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
33865- ioc->name, rphy, rphy->dev.release));
33866- }
33867-}
33868-
33869 static inline struct sas_port *
33870 mptsas_get_port(struct mptsas_phyinfo *phy_info)
33871 {
33872diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
33873index 0c3ced7..1fe34ec 100644
33874--- a/drivers/message/fusion/mptscsih.c
33875+++ b/drivers/message/fusion/mptscsih.c
33876@@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
33877
33878 h = shost_priv(SChost);
33879
33880- if (h) {
33881- if (h->info_kbuf == NULL)
33882- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
33883- return h->info_kbuf;
33884- h->info_kbuf[0] = '\0';
33885+ if (!h)
33886+ return NULL;
33887
33888- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
33889- h->info_kbuf[size-1] = '\0';
33890- }
33891+ if (h->info_kbuf == NULL)
33892+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
33893+ return h->info_kbuf;
33894+ h->info_kbuf[0] = '\0';
33895+
33896+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
33897+ h->info_kbuf[size-1] = '\0';
33898
33899 return h->info_kbuf;
33900 }
33901diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
33902index 6d115c7..58ff7fd 100644
33903--- a/drivers/message/i2o/i2o_proc.c
33904+++ b/drivers/message/i2o/i2o_proc.c
33905@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
33906 "Array Controller Device"
33907 };
33908
33909-static char *chtostr(u8 * chars, int n)
33910-{
33911- char tmp[256];
33912- tmp[0] = 0;
33913- return strncat(tmp, (char *)chars, n);
33914-}
33915-
33916 static int i2o_report_query_status(struct seq_file *seq, int block_status,
33917 char *group)
33918 {
33919@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
33920
33921 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
33922 seq_printf(seq, "%-#8x", ddm_table.module_id);
33923- seq_printf(seq, "%-29s",
33924- chtostr(ddm_table.module_name_version, 28));
33925+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
33926 seq_printf(seq, "%9d ", ddm_table.data_size);
33927 seq_printf(seq, "%8d", ddm_table.code_size);
33928
33929@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
33930
33931 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
33932 seq_printf(seq, "%-#8x", dst->module_id);
33933- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
33934- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
33935+ seq_printf(seq, "%-.28s", dst->module_name_version);
33936+ seq_printf(seq, "%-.8s", dst->date);
33937 seq_printf(seq, "%8d ", dst->module_size);
33938 seq_printf(seq, "%8d ", dst->mpb_size);
33939 seq_printf(seq, "0x%04x", dst->module_flags);
33940@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
33941 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
33942 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
33943 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
33944- seq_printf(seq, "Vendor info : %s\n",
33945- chtostr((u8 *) (work32 + 2), 16));
33946- seq_printf(seq, "Product info : %s\n",
33947- chtostr((u8 *) (work32 + 6), 16));
33948- seq_printf(seq, "Description : %s\n",
33949- chtostr((u8 *) (work32 + 10), 16));
33950- seq_printf(seq, "Product rev. : %s\n",
33951- chtostr((u8 *) (work32 + 14), 8));
33952+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
33953+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
33954+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
33955+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
33956
33957 seq_printf(seq, "Serial number : ");
33958 print_serial_number(seq, (u8 *) (work32 + 16),
33959@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
33960 }
33961
33962 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
33963- seq_printf(seq, "Module name : %s\n",
33964- chtostr(result.module_name, 24));
33965- seq_printf(seq, "Module revision : %s\n",
33966- chtostr(result.module_rev, 8));
33967+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
33968+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
33969
33970 seq_printf(seq, "Serial number : ");
33971 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
33972@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
33973 return 0;
33974 }
33975
33976- seq_printf(seq, "Device name : %s\n",
33977- chtostr(result.device_name, 64));
33978- seq_printf(seq, "Service name : %s\n",
33979- chtostr(result.service_name, 64));
33980- seq_printf(seq, "Physical name : %s\n",
33981- chtostr(result.physical_location, 64));
33982- seq_printf(seq, "Instance number : %s\n",
33983- chtostr(result.instance_number, 4));
33984+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
33985+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
33986+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
33987+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
33988
33989 return 0;
33990 }
33991diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
33992index a8c08f3..155fe3d 100644
33993--- a/drivers/message/i2o/iop.c
33994+++ b/drivers/message/i2o/iop.c
33995@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
33996
33997 spin_lock_irqsave(&c->context_list_lock, flags);
33998
33999- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
34000- atomic_inc(&c->context_list_counter);
34001+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
34002+ atomic_inc_unchecked(&c->context_list_counter);
34003
34004- entry->context = atomic_read(&c->context_list_counter);
34005+ entry->context = atomic_read_unchecked(&c->context_list_counter);
34006
34007 list_add(&entry->list, &c->context_list);
34008
34009@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
34010
34011 #if BITS_PER_LONG == 64
34012 spin_lock_init(&c->context_list_lock);
34013- atomic_set(&c->context_list_counter, 0);
34014+ atomic_set_unchecked(&c->context_list_counter, 0);
34015 INIT_LIST_HEAD(&c->context_list);
34016 #endif
34017
34018diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
34019index 7ce65f4..e66e9bc 100644
34020--- a/drivers/mfd/abx500-core.c
34021+++ b/drivers/mfd/abx500-core.c
34022@@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
34023
34024 struct abx500_device_entry {
34025 struct list_head list;
34026- struct abx500_ops ops;
34027+ abx500_ops_no_const ops;
34028 struct device *dev;
34029 };
34030
34031diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
34032index a9223ed..4127b13 100644
34033--- a/drivers/mfd/janz-cmodio.c
34034+++ b/drivers/mfd/janz-cmodio.c
34035@@ -13,6 +13,7 @@
34036
34037 #include <linux/kernel.h>
34038 #include <linux/module.h>
34039+#include <linux/slab.h>
34040 #include <linux/init.h>
34041 #include <linux/pci.h>
34042 #include <linux/interrupt.h>
34043diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
34044index a981e2a..5ca0c8b 100644
34045--- a/drivers/misc/lis3lv02d/lis3lv02d.c
34046+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
34047@@ -466,7 +466,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
34048 * the lid is closed. This leads to interrupts as soon as a little move
34049 * is done.
34050 */
34051- atomic_inc(&lis3->count);
34052+ atomic_inc_unchecked(&lis3->count);
34053
34054 wake_up_interruptible(&lis3->misc_wait);
34055 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
34056@@ -552,7 +552,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
34057 if (lis3->pm_dev)
34058 pm_runtime_get_sync(lis3->pm_dev);
34059
34060- atomic_set(&lis3->count, 0);
34061+ atomic_set_unchecked(&lis3->count, 0);
34062 return 0;
34063 }
34064
34065@@ -585,7 +585,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
34066 add_wait_queue(&lis3->misc_wait, &wait);
34067 while (true) {
34068 set_current_state(TASK_INTERRUPTIBLE);
34069- data = atomic_xchg(&lis3->count, 0);
34070+ data = atomic_xchg_unchecked(&lis3->count, 0);
34071 if (data)
34072 break;
34073
34074@@ -626,7 +626,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
34075 struct lis3lv02d, miscdev);
34076
34077 poll_wait(file, &lis3->misc_wait, wait);
34078- if (atomic_read(&lis3->count))
34079+ if (atomic_read_unchecked(&lis3->count))
34080 return POLLIN | POLLRDNORM;
34081 return 0;
34082 }
34083diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
34084index 2b1482a..5d33616 100644
34085--- a/drivers/misc/lis3lv02d/lis3lv02d.h
34086+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
34087@@ -266,7 +266,7 @@ struct lis3lv02d {
34088 struct input_polled_dev *idev; /* input device */
34089 struct platform_device *pdev; /* platform device */
34090 struct regulator_bulk_data regulators[2];
34091- atomic_t count; /* interrupt count after last read */
34092+ atomic_unchecked_t count; /* interrupt count after last read */
34093 union axis_conversion ac; /* hw -> logical axis */
34094 int mapped_btns[3];
34095
34096diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
34097index 2f30bad..c4c13d0 100644
34098--- a/drivers/misc/sgi-gru/gruhandles.c
34099+++ b/drivers/misc/sgi-gru/gruhandles.c
34100@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
34101 unsigned long nsec;
34102
34103 nsec = CLKS2NSEC(clks);
34104- atomic_long_inc(&mcs_op_statistics[op].count);
34105- atomic_long_add(nsec, &mcs_op_statistics[op].total);
34106+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
34107+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
34108 if (mcs_op_statistics[op].max < nsec)
34109 mcs_op_statistics[op].max = nsec;
34110 }
34111diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
34112index 950dbe9..eeef0f8 100644
34113--- a/drivers/misc/sgi-gru/gruprocfs.c
34114+++ b/drivers/misc/sgi-gru/gruprocfs.c
34115@@ -32,9 +32,9 @@
34116
34117 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
34118
34119-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
34120+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
34121 {
34122- unsigned long val = atomic_long_read(v);
34123+ unsigned long val = atomic_long_read_unchecked(v);
34124
34125 seq_printf(s, "%16lu %s\n", val, id);
34126 }
34127@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
34128
34129 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
34130 for (op = 0; op < mcsop_last; op++) {
34131- count = atomic_long_read(&mcs_op_statistics[op].count);
34132- total = atomic_long_read(&mcs_op_statistics[op].total);
34133+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
34134+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
34135 max = mcs_op_statistics[op].max;
34136 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
34137 count ? total / count : 0, max);
34138diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
34139index 5c3ce24..4915ccb 100644
34140--- a/drivers/misc/sgi-gru/grutables.h
34141+++ b/drivers/misc/sgi-gru/grutables.h
34142@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
34143 * GRU statistics.
34144 */
34145 struct gru_stats_s {
34146- atomic_long_t vdata_alloc;
34147- atomic_long_t vdata_free;
34148- atomic_long_t gts_alloc;
34149- atomic_long_t gts_free;
34150- atomic_long_t gms_alloc;
34151- atomic_long_t gms_free;
34152- atomic_long_t gts_double_allocate;
34153- atomic_long_t assign_context;
34154- atomic_long_t assign_context_failed;
34155- atomic_long_t free_context;
34156- atomic_long_t load_user_context;
34157- atomic_long_t load_kernel_context;
34158- atomic_long_t lock_kernel_context;
34159- atomic_long_t unlock_kernel_context;
34160- atomic_long_t steal_user_context;
34161- atomic_long_t steal_kernel_context;
34162- atomic_long_t steal_context_failed;
34163- atomic_long_t nopfn;
34164- atomic_long_t asid_new;
34165- atomic_long_t asid_next;
34166- atomic_long_t asid_wrap;
34167- atomic_long_t asid_reuse;
34168- atomic_long_t intr;
34169- atomic_long_t intr_cbr;
34170- atomic_long_t intr_tfh;
34171- atomic_long_t intr_spurious;
34172- atomic_long_t intr_mm_lock_failed;
34173- atomic_long_t call_os;
34174- atomic_long_t call_os_wait_queue;
34175- atomic_long_t user_flush_tlb;
34176- atomic_long_t user_unload_context;
34177- atomic_long_t user_exception;
34178- atomic_long_t set_context_option;
34179- atomic_long_t check_context_retarget_intr;
34180- atomic_long_t check_context_unload;
34181- atomic_long_t tlb_dropin;
34182- atomic_long_t tlb_preload_page;
34183- atomic_long_t tlb_dropin_fail_no_asid;
34184- atomic_long_t tlb_dropin_fail_upm;
34185- atomic_long_t tlb_dropin_fail_invalid;
34186- atomic_long_t tlb_dropin_fail_range_active;
34187- atomic_long_t tlb_dropin_fail_idle;
34188- atomic_long_t tlb_dropin_fail_fmm;
34189- atomic_long_t tlb_dropin_fail_no_exception;
34190- atomic_long_t tfh_stale_on_fault;
34191- atomic_long_t mmu_invalidate_range;
34192- atomic_long_t mmu_invalidate_page;
34193- atomic_long_t flush_tlb;
34194- atomic_long_t flush_tlb_gru;
34195- atomic_long_t flush_tlb_gru_tgh;
34196- atomic_long_t flush_tlb_gru_zero_asid;
34197+ atomic_long_unchecked_t vdata_alloc;
34198+ atomic_long_unchecked_t vdata_free;
34199+ atomic_long_unchecked_t gts_alloc;
34200+ atomic_long_unchecked_t gts_free;
34201+ atomic_long_unchecked_t gms_alloc;
34202+ atomic_long_unchecked_t gms_free;
34203+ atomic_long_unchecked_t gts_double_allocate;
34204+ atomic_long_unchecked_t assign_context;
34205+ atomic_long_unchecked_t assign_context_failed;
34206+ atomic_long_unchecked_t free_context;
34207+ atomic_long_unchecked_t load_user_context;
34208+ atomic_long_unchecked_t load_kernel_context;
34209+ atomic_long_unchecked_t lock_kernel_context;
34210+ atomic_long_unchecked_t unlock_kernel_context;
34211+ atomic_long_unchecked_t steal_user_context;
34212+ atomic_long_unchecked_t steal_kernel_context;
34213+ atomic_long_unchecked_t steal_context_failed;
34214+ atomic_long_unchecked_t nopfn;
34215+ atomic_long_unchecked_t asid_new;
34216+ atomic_long_unchecked_t asid_next;
34217+ atomic_long_unchecked_t asid_wrap;
34218+ atomic_long_unchecked_t asid_reuse;
34219+ atomic_long_unchecked_t intr;
34220+ atomic_long_unchecked_t intr_cbr;
34221+ atomic_long_unchecked_t intr_tfh;
34222+ atomic_long_unchecked_t intr_spurious;
34223+ atomic_long_unchecked_t intr_mm_lock_failed;
34224+ atomic_long_unchecked_t call_os;
34225+ atomic_long_unchecked_t call_os_wait_queue;
34226+ atomic_long_unchecked_t user_flush_tlb;
34227+ atomic_long_unchecked_t user_unload_context;
34228+ atomic_long_unchecked_t user_exception;
34229+ atomic_long_unchecked_t set_context_option;
34230+ atomic_long_unchecked_t check_context_retarget_intr;
34231+ atomic_long_unchecked_t check_context_unload;
34232+ atomic_long_unchecked_t tlb_dropin;
34233+ atomic_long_unchecked_t tlb_preload_page;
34234+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
34235+ atomic_long_unchecked_t tlb_dropin_fail_upm;
34236+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
34237+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
34238+ atomic_long_unchecked_t tlb_dropin_fail_idle;
34239+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
34240+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
34241+ atomic_long_unchecked_t tfh_stale_on_fault;
34242+ atomic_long_unchecked_t mmu_invalidate_range;
34243+ atomic_long_unchecked_t mmu_invalidate_page;
34244+ atomic_long_unchecked_t flush_tlb;
34245+ atomic_long_unchecked_t flush_tlb_gru;
34246+ atomic_long_unchecked_t flush_tlb_gru_tgh;
34247+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
34248
34249- atomic_long_t copy_gpa;
34250- atomic_long_t read_gpa;
34251+ atomic_long_unchecked_t copy_gpa;
34252+ atomic_long_unchecked_t read_gpa;
34253
34254- atomic_long_t mesq_receive;
34255- atomic_long_t mesq_receive_none;
34256- atomic_long_t mesq_send;
34257- atomic_long_t mesq_send_failed;
34258- atomic_long_t mesq_noop;
34259- atomic_long_t mesq_send_unexpected_error;
34260- atomic_long_t mesq_send_lb_overflow;
34261- atomic_long_t mesq_send_qlimit_reached;
34262- atomic_long_t mesq_send_amo_nacked;
34263- atomic_long_t mesq_send_put_nacked;
34264- atomic_long_t mesq_page_overflow;
34265- atomic_long_t mesq_qf_locked;
34266- atomic_long_t mesq_qf_noop_not_full;
34267- atomic_long_t mesq_qf_switch_head_failed;
34268- atomic_long_t mesq_qf_unexpected_error;
34269- atomic_long_t mesq_noop_unexpected_error;
34270- atomic_long_t mesq_noop_lb_overflow;
34271- atomic_long_t mesq_noop_qlimit_reached;
34272- atomic_long_t mesq_noop_amo_nacked;
34273- atomic_long_t mesq_noop_put_nacked;
34274- atomic_long_t mesq_noop_page_overflow;
34275+ atomic_long_unchecked_t mesq_receive;
34276+ atomic_long_unchecked_t mesq_receive_none;
34277+ atomic_long_unchecked_t mesq_send;
34278+ atomic_long_unchecked_t mesq_send_failed;
34279+ atomic_long_unchecked_t mesq_noop;
34280+ atomic_long_unchecked_t mesq_send_unexpected_error;
34281+ atomic_long_unchecked_t mesq_send_lb_overflow;
34282+ atomic_long_unchecked_t mesq_send_qlimit_reached;
34283+ atomic_long_unchecked_t mesq_send_amo_nacked;
34284+ atomic_long_unchecked_t mesq_send_put_nacked;
34285+ atomic_long_unchecked_t mesq_page_overflow;
34286+ atomic_long_unchecked_t mesq_qf_locked;
34287+ atomic_long_unchecked_t mesq_qf_noop_not_full;
34288+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
34289+ atomic_long_unchecked_t mesq_qf_unexpected_error;
34290+ atomic_long_unchecked_t mesq_noop_unexpected_error;
34291+ atomic_long_unchecked_t mesq_noop_lb_overflow;
34292+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
34293+ atomic_long_unchecked_t mesq_noop_amo_nacked;
34294+ atomic_long_unchecked_t mesq_noop_put_nacked;
34295+ atomic_long_unchecked_t mesq_noop_page_overflow;
34296
34297 };
34298
34299@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
34300 tghop_invalidate, mcsop_last};
34301
34302 struct mcs_op_statistic {
34303- atomic_long_t count;
34304- atomic_long_t total;
34305+ atomic_long_unchecked_t count;
34306+ atomic_long_unchecked_t total;
34307 unsigned long max;
34308 };
34309
34310@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
34311
34312 #define STAT(id) do { \
34313 if (gru_options & OPT_STATS) \
34314- atomic_long_inc(&gru_stats.id); \
34315+ atomic_long_inc_unchecked(&gru_stats.id); \
34316 } while (0)
34317
34318 #ifdef CONFIG_SGI_GRU_DEBUG
34319diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
34320index c862cd4..0d176fe 100644
34321--- a/drivers/misc/sgi-xp/xp.h
34322+++ b/drivers/misc/sgi-xp/xp.h
34323@@ -288,7 +288,7 @@ struct xpc_interface {
34324 xpc_notify_func, void *);
34325 void (*received) (short, int, void *);
34326 enum xp_retval (*partid_to_nasids) (short, void *);
34327-};
34328+} __no_const;
34329
34330 extern struct xpc_interface xpc_interface;
34331
34332diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
34333index b94d5f7..7f494c5 100644
34334--- a/drivers/misc/sgi-xp/xpc.h
34335+++ b/drivers/misc/sgi-xp/xpc.h
34336@@ -835,6 +835,7 @@ struct xpc_arch_operations {
34337 void (*received_payload) (struct xpc_channel *, void *);
34338 void (*notify_senders_of_disconnect) (struct xpc_channel *);
34339 };
34340+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
34341
34342 /* struct xpc_partition act_state values (for XPC HB) */
34343
34344@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
34345 /* found in xpc_main.c */
34346 extern struct device *xpc_part;
34347 extern struct device *xpc_chan;
34348-extern struct xpc_arch_operations xpc_arch_ops;
34349+extern xpc_arch_operations_no_const xpc_arch_ops;
34350 extern int xpc_disengage_timelimit;
34351 extern int xpc_disengage_timedout;
34352 extern int xpc_activate_IRQ_rcvd;
34353diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
34354index 8d082b4..aa749ae 100644
34355--- a/drivers/misc/sgi-xp/xpc_main.c
34356+++ b/drivers/misc/sgi-xp/xpc_main.c
34357@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
34358 .notifier_call = xpc_system_die,
34359 };
34360
34361-struct xpc_arch_operations xpc_arch_ops;
34362+xpc_arch_operations_no_const xpc_arch_ops;
34363
34364 /*
34365 * Timer function to enforce the timelimit on the partition disengage.
34366diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
34367index 69ef0be..f3ef91e 100644
34368--- a/drivers/mmc/host/sdhci-pci.c
34369+++ b/drivers/mmc/host/sdhci-pci.c
34370@@ -652,7 +652,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
34371 .probe = via_probe,
34372 };
34373
34374-static const struct pci_device_id pci_ids[] __devinitdata = {
34375+static const struct pci_device_id pci_ids[] __devinitconst = {
34376 {
34377 .vendor = PCI_VENDOR_ID_RICOH,
34378 .device = PCI_DEVICE_ID_RICOH_R5C822,
34379diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
34380index a4eb8b5..8c0628f 100644
34381--- a/drivers/mtd/devices/doc2000.c
34382+++ b/drivers/mtd/devices/doc2000.c
34383@@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
34384
34385 /* The ECC will not be calculated correctly if less than 512 is written */
34386 /* DBB-
34387- if (len != 0x200 && eccbuf)
34388+ if (len != 0x200)
34389 printk(KERN_WARNING
34390 "ECC needs a full sector write (adr: %lx size %lx)\n",
34391 (long) to, (long) len);
34392diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
34393index a9e57d6..c6d8731 100644
34394--- a/drivers/mtd/nand/denali.c
34395+++ b/drivers/mtd/nand/denali.c
34396@@ -26,6 +26,7 @@
34397 #include <linux/pci.h>
34398 #include <linux/mtd/mtd.h>
34399 #include <linux/module.h>
34400+#include <linux/slab.h>
34401
34402 #include "denali.h"
34403
34404diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
34405index 51b9d6a..52af9a7 100644
34406--- a/drivers/mtd/nftlmount.c
34407+++ b/drivers/mtd/nftlmount.c
34408@@ -24,6 +24,7 @@
34409 #include <asm/errno.h>
34410 #include <linux/delay.h>
34411 #include <linux/slab.h>
34412+#include <linux/sched.h>
34413 #include <linux/mtd/mtd.h>
34414 #include <linux/mtd/nand.h>
34415 #include <linux/mtd/nftl.h>
34416diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
34417index 6762dc4..9956862 100644
34418--- a/drivers/net/ethernet/atheros/atlx/atl2.c
34419+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
34420@@ -2859,7 +2859,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
34421 */
34422
34423 #define ATL2_PARAM(X, desc) \
34424- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34425+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34426 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
34427 MODULE_PARM_DESC(X, desc);
34428 #else
34429diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34430index 61a7670..7da6e34 100644
34431--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34432+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34433@@ -483,7 +483,7 @@ struct bnx2x_rx_mode_obj {
34434
34435 int (*wait_comp)(struct bnx2x *bp,
34436 struct bnx2x_rx_mode_ramrod_params *p);
34437-};
34438+} __no_const;
34439
34440 /********************** Set multicast group ***********************************/
34441
34442diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
34443index 93865f8..5448741 100644
34444--- a/drivers/net/ethernet/broadcom/tg3.h
34445+++ b/drivers/net/ethernet/broadcom/tg3.h
34446@@ -140,6 +140,7 @@
34447 #define CHIPREV_ID_5750_A0 0x4000
34448 #define CHIPREV_ID_5750_A1 0x4001
34449 #define CHIPREV_ID_5750_A3 0x4003
34450+#define CHIPREV_ID_5750_C1 0x4201
34451 #define CHIPREV_ID_5750_C2 0x4202
34452 #define CHIPREV_ID_5752_A0_HW 0x5000
34453 #define CHIPREV_ID_5752_A0 0x6000
34454diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34455index c4e8643..0979484 100644
34456--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34457+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34458@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
34459 */
34460 struct l2t_skb_cb {
34461 arp_failure_handler_func arp_failure_handler;
34462-};
34463+} __no_const;
34464
34465 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
34466
34467diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
34468index 18b106c..2b38d36 100644
34469--- a/drivers/net/ethernet/dec/tulip/de4x5.c
34470+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
34471@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34472 for (i=0; i<ETH_ALEN; i++) {
34473 tmp.addr[i] = dev->dev_addr[i];
34474 }
34475- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34476+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34477 break;
34478
34479 case DE4X5_SET_HWADDR: /* Set the hardware address */
34480@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34481 spin_lock_irqsave(&lp->lock, flags);
34482 memcpy(&statbuf, &lp->pktStats, ioc->len);
34483 spin_unlock_irqrestore(&lp->lock, flags);
34484- if (copy_to_user(ioc->data, &statbuf, ioc->len))
34485+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
34486 return -EFAULT;
34487 break;
34488 }
34489diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
34490index ed7d1dc..d426748 100644
34491--- a/drivers/net/ethernet/dec/tulip/eeprom.c
34492+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
34493@@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
34494 {NULL}};
34495
34496
34497-static const char *block_name[] __devinitdata = {
34498+static const char *block_name[] __devinitconst = {
34499 "21140 non-MII",
34500 "21140 MII PHY",
34501 "21142 Serial PHY",
34502diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
34503index 2ac6fff..2d127d0 100644
34504--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
34505+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
34506@@ -236,7 +236,7 @@ struct pci_id_info {
34507 int drv_flags; /* Driver use, intended as capability flags. */
34508 };
34509
34510-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34511+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34512 { /* Sometime a Level-One switch card. */
34513 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
34514 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
34515diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
34516index d783f4f..97fa1b0 100644
34517--- a/drivers/net/ethernet/dlink/sundance.c
34518+++ b/drivers/net/ethernet/dlink/sundance.c
34519@@ -218,7 +218,7 @@ enum {
34520 struct pci_id_info {
34521 const char *name;
34522 };
34523-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34524+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34525 {"D-Link DFE-550TX FAST Ethernet Adapter"},
34526 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
34527 {"D-Link DFE-580TX 4 port Server Adapter"},
34528diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
34529index 528a886..e6a98a3 100644
34530--- a/drivers/net/ethernet/emulex/benet/be_main.c
34531+++ b/drivers/net/ethernet/emulex/benet/be_main.c
34532@@ -403,7 +403,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
34533
34534 if (wrapped)
34535 newacc += 65536;
34536- ACCESS_ONCE(*acc) = newacc;
34537+ ACCESS_ONCE_RW(*acc) = newacc;
34538 }
34539
34540 void be_parse_stats(struct be_adapter *adapter)
34541diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
34542index 16b0704..d2c07d7 100644
34543--- a/drivers/net/ethernet/faraday/ftgmac100.c
34544+++ b/drivers/net/ethernet/faraday/ftgmac100.c
34545@@ -31,6 +31,8 @@
34546 #include <linux/netdevice.h>
34547 #include <linux/phy.h>
34548 #include <linux/platform_device.h>
34549+#include <linux/interrupt.h>
34550+#include <linux/irqreturn.h>
34551 #include <net/ip.h>
34552
34553 #include "ftgmac100.h"
34554diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
34555index 829b109..4ae5f6a 100644
34556--- a/drivers/net/ethernet/faraday/ftmac100.c
34557+++ b/drivers/net/ethernet/faraday/ftmac100.c
34558@@ -31,6 +31,8 @@
34559 #include <linux/module.h>
34560 #include <linux/netdevice.h>
34561 #include <linux/platform_device.h>
34562+#include <linux/interrupt.h>
34563+#include <linux/irqreturn.h>
34564
34565 #include "ftmac100.h"
34566
34567diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
34568index 1637b98..c42f87b 100644
34569--- a/drivers/net/ethernet/fealnx.c
34570+++ b/drivers/net/ethernet/fealnx.c
34571@@ -150,7 +150,7 @@ struct chip_info {
34572 int flags;
34573 };
34574
34575-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
34576+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
34577 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34578 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
34579 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34580diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
34581index b83897f..b2d970f 100644
34582--- a/drivers/net/ethernet/intel/e1000e/e1000.h
34583+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
34584@@ -181,7 +181,7 @@ struct e1000_info;
34585 #define E1000_TXDCTL_DMA_BURST_ENABLE \
34586 (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
34587 E1000_TXDCTL_COUNT_DESC | \
34588- (5 << 16) | /* wthresh must be +1 more than desired */\
34589+ (1 << 16) | /* wthresh must be +1 more than desired */\
34590 (1 << 8) | /* hthresh */ \
34591 0x1f) /* pthresh */
34592
34593diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
34594index f82ecf5..7d59ecb 100644
34595--- a/drivers/net/ethernet/intel/e1000e/hw.h
34596+++ b/drivers/net/ethernet/intel/e1000e/hw.h
34597@@ -784,6 +784,7 @@ struct e1000_mac_operations {
34598 void (*config_collision_dist)(struct e1000_hw *);
34599 s32 (*read_mac_addr)(struct e1000_hw *);
34600 };
34601+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34602
34603 /*
34604 * When to use various PHY register access functions:
34605@@ -824,6 +825,7 @@ struct e1000_phy_operations {
34606 void (*power_up)(struct e1000_hw *);
34607 void (*power_down)(struct e1000_hw *);
34608 };
34609+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34610
34611 /* Function pointers for the NVM. */
34612 struct e1000_nvm_operations {
34613@@ -836,9 +838,10 @@ struct e1000_nvm_operations {
34614 s32 (*validate)(struct e1000_hw *);
34615 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
34616 };
34617+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34618
34619 struct e1000_mac_info {
34620- struct e1000_mac_operations ops;
34621+ e1000_mac_operations_no_const ops;
34622 u8 addr[ETH_ALEN];
34623 u8 perm_addr[ETH_ALEN];
34624
34625@@ -879,7 +882,7 @@ struct e1000_mac_info {
34626 };
34627
34628 struct e1000_phy_info {
34629- struct e1000_phy_operations ops;
34630+ e1000_phy_operations_no_const ops;
34631
34632 enum e1000_phy_type type;
34633
34634@@ -913,7 +916,7 @@ struct e1000_phy_info {
34635 };
34636
34637 struct e1000_nvm_info {
34638- struct e1000_nvm_operations ops;
34639+ e1000_nvm_operations_no_const ops;
34640
34641 enum e1000_nvm_type type;
34642 enum e1000_nvm_override override;
34643diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
34644index f67cbd3..cef9e3d 100644
34645--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
34646+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
34647@@ -314,6 +314,7 @@ struct e1000_mac_operations {
34648 s32 (*read_mac_addr)(struct e1000_hw *);
34649 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
34650 };
34651+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34652
34653 struct e1000_phy_operations {
34654 s32 (*acquire)(struct e1000_hw *);
34655@@ -330,6 +331,7 @@ struct e1000_phy_operations {
34656 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
34657 s32 (*write_reg)(struct e1000_hw *, u32, u16);
34658 };
34659+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34660
34661 struct e1000_nvm_operations {
34662 s32 (*acquire)(struct e1000_hw *);
34663@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
34664 s32 (*update)(struct e1000_hw *);
34665 s32 (*validate)(struct e1000_hw *);
34666 };
34667+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34668
34669 struct e1000_info {
34670 s32 (*get_invariants)(struct e1000_hw *);
34671@@ -350,7 +353,7 @@ struct e1000_info {
34672 extern const struct e1000_info e1000_82575_info;
34673
34674 struct e1000_mac_info {
34675- struct e1000_mac_operations ops;
34676+ e1000_mac_operations_no_const ops;
34677
34678 u8 addr[6];
34679 u8 perm_addr[6];
34680@@ -388,7 +391,7 @@ struct e1000_mac_info {
34681 };
34682
34683 struct e1000_phy_info {
34684- struct e1000_phy_operations ops;
34685+ e1000_phy_operations_no_const ops;
34686
34687 enum e1000_phy_type type;
34688
34689@@ -423,7 +426,7 @@ struct e1000_phy_info {
34690 };
34691
34692 struct e1000_nvm_info {
34693- struct e1000_nvm_operations ops;
34694+ e1000_nvm_operations_no_const ops;
34695 enum e1000_nvm_type type;
34696 enum e1000_nvm_override override;
34697
34698@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
34699 s32 (*check_for_ack)(struct e1000_hw *, u16);
34700 s32 (*check_for_rst)(struct e1000_hw *, u16);
34701 };
34702+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34703
34704 struct e1000_mbx_stats {
34705 u32 msgs_tx;
34706@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
34707 };
34708
34709 struct e1000_mbx_info {
34710- struct e1000_mbx_operations ops;
34711+ e1000_mbx_operations_no_const ops;
34712 struct e1000_mbx_stats stats;
34713 u32 timeout;
34714 u32 usec_delay;
34715diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
34716index 57db3c6..aa825fc 100644
34717--- a/drivers/net/ethernet/intel/igbvf/vf.h
34718+++ b/drivers/net/ethernet/intel/igbvf/vf.h
34719@@ -189,9 +189,10 @@ struct e1000_mac_operations {
34720 s32 (*read_mac_addr)(struct e1000_hw *);
34721 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
34722 };
34723+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34724
34725 struct e1000_mac_info {
34726- struct e1000_mac_operations ops;
34727+ e1000_mac_operations_no_const ops;
34728 u8 addr[6];
34729 u8 perm_addr[6];
34730
34731@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
34732 s32 (*check_for_ack)(struct e1000_hw *);
34733 s32 (*check_for_rst)(struct e1000_hw *);
34734 };
34735+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34736
34737 struct e1000_mbx_stats {
34738 u32 msgs_tx;
34739@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
34740 };
34741
34742 struct e1000_mbx_info {
34743- struct e1000_mbx_operations ops;
34744+ e1000_mbx_operations_no_const ops;
34745 struct e1000_mbx_stats stats;
34746 u32 timeout;
34747 u32 usec_delay;
34748diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34749index 8636e83..ab9bbc3 100644
34750--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34751+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34752@@ -2710,6 +2710,7 @@ struct ixgbe_eeprom_operations {
34753 s32 (*update_checksum)(struct ixgbe_hw *);
34754 u16 (*calc_checksum)(struct ixgbe_hw *);
34755 };
34756+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
34757
34758 struct ixgbe_mac_operations {
34759 s32 (*init_hw)(struct ixgbe_hw *);
34760@@ -2773,6 +2774,7 @@ struct ixgbe_mac_operations {
34761 /* Manageability interface */
34762 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
34763 };
34764+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
34765
34766 struct ixgbe_phy_operations {
34767 s32 (*identify)(struct ixgbe_hw *);
34768@@ -2792,9 +2794,10 @@ struct ixgbe_phy_operations {
34769 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
34770 s32 (*check_overtemp)(struct ixgbe_hw *);
34771 };
34772+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
34773
34774 struct ixgbe_eeprom_info {
34775- struct ixgbe_eeprom_operations ops;
34776+ ixgbe_eeprom_operations_no_const ops;
34777 enum ixgbe_eeprom_type type;
34778 u32 semaphore_delay;
34779 u16 word_size;
34780@@ -2804,7 +2807,7 @@ struct ixgbe_eeprom_info {
34781
34782 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
34783 struct ixgbe_mac_info {
34784- struct ixgbe_mac_operations ops;
34785+ ixgbe_mac_operations_no_const ops;
34786 enum ixgbe_mac_type type;
34787 u8 addr[ETH_ALEN];
34788 u8 perm_addr[ETH_ALEN];
34789@@ -2832,7 +2835,7 @@ struct ixgbe_mac_info {
34790 };
34791
34792 struct ixgbe_phy_info {
34793- struct ixgbe_phy_operations ops;
34794+ ixgbe_phy_operations_no_const ops;
34795 struct mdio_if_info mdio;
34796 enum ixgbe_phy_type type;
34797 u32 id;
34798@@ -2860,6 +2863,7 @@ struct ixgbe_mbx_operations {
34799 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
34800 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
34801 };
34802+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
34803
34804 struct ixgbe_mbx_stats {
34805 u32 msgs_tx;
34806@@ -2871,7 +2875,7 @@ struct ixgbe_mbx_stats {
34807 };
34808
34809 struct ixgbe_mbx_info {
34810- struct ixgbe_mbx_operations ops;
34811+ ixgbe_mbx_operations_no_const ops;
34812 struct ixgbe_mbx_stats stats;
34813 u32 timeout;
34814 u32 usec_delay;
34815diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
34816index 25c951d..cc7cf33 100644
34817--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
34818+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
34819@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
34820 s32 (*clear_vfta)(struct ixgbe_hw *);
34821 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
34822 };
34823+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
34824
34825 enum ixgbe_mac_type {
34826 ixgbe_mac_unknown = 0,
34827@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
34828 };
34829
34830 struct ixgbe_mac_info {
34831- struct ixgbe_mac_operations ops;
34832+ ixgbe_mac_operations_no_const ops;
34833 u8 addr[6];
34834 u8 perm_addr[6];
34835
34836@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
34837 s32 (*check_for_ack)(struct ixgbe_hw *);
34838 s32 (*check_for_rst)(struct ixgbe_hw *);
34839 };
34840+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
34841
34842 struct ixgbe_mbx_stats {
34843 u32 msgs_tx;
34844@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
34845 };
34846
34847 struct ixgbe_mbx_info {
34848- struct ixgbe_mbx_operations ops;
34849+ ixgbe_mbx_operations_no_const ops;
34850 struct ixgbe_mbx_stats stats;
34851 u32 timeout;
34852 u32 udelay;
34853diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
34854index 8bb05b4..074796f 100644
34855--- a/drivers/net/ethernet/mellanox/mlx4/main.c
34856+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
34857@@ -41,6 +41,7 @@
34858 #include <linux/slab.h>
34859 #include <linux/io-mapping.h>
34860 #include <linux/delay.h>
34861+#include <linux/sched.h>
34862
34863 #include <linux/mlx4/device.h>
34864 #include <linux/mlx4/doorbell.h>
34865diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
34866index 5046a64..71ca936 100644
34867--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
34868+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
34869@@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
34870 void (*link_down)(struct __vxge_hw_device *devh);
34871 void (*crit_err)(struct __vxge_hw_device *devh,
34872 enum vxge_hw_event type, u64 ext_data);
34873-};
34874+} __no_const;
34875
34876 /*
34877 * struct __vxge_hw_blockpool_entry - Block private data structure
34878diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34879index 4a518a3..936b334 100644
34880--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34881+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34882@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
34883 struct vxge_hw_mempool_dma *dma_object,
34884 u32 index,
34885 u32 is_last);
34886-};
34887+} __no_const;
34888
34889 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
34890 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
34891diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
34892index ce6b44d..74f10c2 100644
34893--- a/drivers/net/ethernet/realtek/r8169.c
34894+++ b/drivers/net/ethernet/realtek/r8169.c
34895@@ -708,17 +708,17 @@ struct rtl8169_private {
34896 struct mdio_ops {
34897 void (*write)(void __iomem *, int, int);
34898 int (*read)(void __iomem *, int);
34899- } mdio_ops;
34900+ } __no_const mdio_ops;
34901
34902 struct pll_power_ops {
34903 void (*down)(struct rtl8169_private *);
34904 void (*up)(struct rtl8169_private *);
34905- } pll_power_ops;
34906+ } __no_const pll_power_ops;
34907
34908 struct jumbo_ops {
34909 void (*enable)(struct rtl8169_private *);
34910 void (*disable)(struct rtl8169_private *);
34911- } jumbo_ops;
34912+ } __no_const jumbo_ops;
34913
34914 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
34915 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
34916diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
34917index a9deda8..5507c31 100644
34918--- a/drivers/net/ethernet/sis/sis190.c
34919+++ b/drivers/net/ethernet/sis/sis190.c
34920@@ -1620,7 +1620,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
34921 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
34922 struct net_device *dev)
34923 {
34924- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
34925+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
34926 struct sis190_private *tp = netdev_priv(dev);
34927 struct pci_dev *isa_bridge;
34928 u8 reg, tmp8;
34929diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
34930index c07cfe9..81cbf7e 100644
34931--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
34932+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
34933@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
34934
34935 writel(value, ioaddr + MMC_CNTRL);
34936
34937- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
34938- MMC_CNTRL, value);
34939+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
34940+// MMC_CNTRL, value);
34941 }
34942
34943 /* To mask all all interrupts.*/
34944diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
34945index 48d56da..a27e46c 100644
34946--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
34947+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
34948@@ -1584,7 +1584,7 @@ static const struct file_operations stmmac_rings_status_fops = {
34949 .open = stmmac_sysfs_ring_open,
34950 .read = seq_read,
34951 .llseek = seq_lseek,
34952- .release = seq_release,
34953+ .release = single_release,
34954 };
34955
34956 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
34957@@ -1656,7 +1656,7 @@ static const struct file_operations stmmac_dma_cap_fops = {
34958 .open = stmmac_sysfs_dma_cap_open,
34959 .read = seq_read,
34960 .llseek = seq_lseek,
34961- .release = seq_release,
34962+ .release = single_release,
34963 };
34964
34965 static int stmmac_init_fs(struct net_device *dev)
34966diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
34967index c358245..8c1de63 100644
34968--- a/drivers/net/hyperv/hyperv_net.h
34969+++ b/drivers/net/hyperv/hyperv_net.h
34970@@ -98,7 +98,7 @@ struct rndis_device {
34971
34972 enum rndis_device_state state;
34973 bool link_state;
34974- atomic_t new_req_id;
34975+ atomic_unchecked_t new_req_id;
34976
34977 spinlock_t request_lock;
34978 struct list_head req_list;
34979diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
34980index d6be64b..5d97e3b 100644
34981--- a/drivers/net/hyperv/rndis_filter.c
34982+++ b/drivers/net/hyperv/rndis_filter.c
34983@@ -97,7 +97,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
34984 * template
34985 */
34986 set = &rndis_msg->msg.set_req;
34987- set->req_id = atomic_inc_return(&dev->new_req_id);
34988+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34989
34990 /* Add to the request list */
34991 spin_lock_irqsave(&dev->request_lock, flags);
34992@@ -648,7 +648,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
34993
34994 /* Setup the rndis set */
34995 halt = &request->request_msg.msg.halt_req;
34996- halt->req_id = atomic_inc_return(&dev->new_req_id);
34997+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34998
34999 /* Ignore return since this msg is optional. */
35000 rndis_filter_send_request(dev, request);
35001diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
35002index cb8fd50..003ec38 100644
35003--- a/drivers/net/macvtap.c
35004+++ b/drivers/net/macvtap.c
35005@@ -528,6 +528,8 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
35006 }
35007 base = (unsigned long)from->iov_base + offset1;
35008 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
35009+ if (i + size >= MAX_SKB_FRAGS)
35010+ return -EFAULT;
35011 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
35012 if ((num_pages != size) ||
35013 (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags))
35014diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
35015index 21d7151..8034208 100644
35016--- a/drivers/net/ppp/ppp_generic.c
35017+++ b/drivers/net/ppp/ppp_generic.c
35018@@ -986,7 +986,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35019 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
35020 struct ppp_stats stats;
35021 struct ppp_comp_stats cstats;
35022- char *vers;
35023
35024 switch (cmd) {
35025 case SIOCGPPPSTATS:
35026@@ -1008,8 +1007,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35027 break;
35028
35029 case SIOCGPPPVER:
35030- vers = PPP_VERSION;
35031- if (copy_to_user(addr, vers, strlen(vers) + 1))
35032+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
35033 break;
35034 err = 0;
35035 break;
35036diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
35037index b715e6b..6d2490f 100644
35038--- a/drivers/net/tokenring/abyss.c
35039+++ b/drivers/net/tokenring/abyss.c
35040@@ -450,10 +450,12 @@ static struct pci_driver abyss_driver = {
35041
35042 static int __init abyss_init (void)
35043 {
35044- abyss_netdev_ops = tms380tr_netdev_ops;
35045+ pax_open_kernel();
35046+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35047
35048- abyss_netdev_ops.ndo_open = abyss_open;
35049- abyss_netdev_ops.ndo_stop = abyss_close;
35050+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
35051+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
35052+ pax_close_kernel();
35053
35054 return pci_register_driver(&abyss_driver);
35055 }
35056diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
35057index 28adcdf..ae82f35 100644
35058--- a/drivers/net/tokenring/madgemc.c
35059+++ b/drivers/net/tokenring/madgemc.c
35060@@ -742,9 +742,11 @@ static struct mca_driver madgemc_driver = {
35061
35062 static int __init madgemc_init (void)
35063 {
35064- madgemc_netdev_ops = tms380tr_netdev_ops;
35065- madgemc_netdev_ops.ndo_open = madgemc_open;
35066- madgemc_netdev_ops.ndo_stop = madgemc_close;
35067+ pax_open_kernel();
35068+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35069+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
35070+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
35071+ pax_close_kernel();
35072
35073 return mca_register_driver (&madgemc_driver);
35074 }
35075diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
35076index 62d90e4..9d84237 100644
35077--- a/drivers/net/tokenring/proteon.c
35078+++ b/drivers/net/tokenring/proteon.c
35079@@ -352,9 +352,11 @@ static int __init proteon_init(void)
35080 struct platform_device *pdev;
35081 int i, num = 0, err = 0;
35082
35083- proteon_netdev_ops = tms380tr_netdev_ops;
35084- proteon_netdev_ops.ndo_open = proteon_open;
35085- proteon_netdev_ops.ndo_stop = tms380tr_close;
35086+ pax_open_kernel();
35087+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35088+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
35089+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
35090+ pax_close_kernel();
35091
35092 err = platform_driver_register(&proteon_driver);
35093 if (err)
35094diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
35095index ee11e93..c8f19c7 100644
35096--- a/drivers/net/tokenring/skisa.c
35097+++ b/drivers/net/tokenring/skisa.c
35098@@ -362,9 +362,11 @@ static int __init sk_isa_init(void)
35099 struct platform_device *pdev;
35100 int i, num = 0, err = 0;
35101
35102- sk_isa_netdev_ops = tms380tr_netdev_ops;
35103- sk_isa_netdev_ops.ndo_open = sk_isa_open;
35104- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35105+ pax_open_kernel();
35106+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35107+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
35108+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35109+ pax_close_kernel();
35110
35111 err = platform_driver_register(&sk_isa_driver);
35112 if (err)
35113diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
35114index 2d2a688..35f2372 100644
35115--- a/drivers/net/usb/hso.c
35116+++ b/drivers/net/usb/hso.c
35117@@ -71,7 +71,7 @@
35118 #include <asm/byteorder.h>
35119 #include <linux/serial_core.h>
35120 #include <linux/serial.h>
35121-
35122+#include <asm/local.h>
35123
35124 #define MOD_AUTHOR "Option Wireless"
35125 #define MOD_DESCRIPTION "USB High Speed Option driver"
35126@@ -257,7 +257,7 @@ struct hso_serial {
35127
35128 /* from usb_serial_port */
35129 struct tty_struct *tty;
35130- int open_count;
35131+ local_t open_count;
35132 spinlock_t serial_lock;
35133
35134 int (*write_data) (struct hso_serial *serial);
35135@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
35136 struct urb *urb;
35137
35138 urb = serial->rx_urb[0];
35139- if (serial->open_count > 0) {
35140+ if (local_read(&serial->open_count) > 0) {
35141 count = put_rxbuf_data(urb, serial);
35142 if (count == -1)
35143 return;
35144@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
35145 DUMP1(urb->transfer_buffer, urb->actual_length);
35146
35147 /* Anyone listening? */
35148- if (serial->open_count == 0)
35149+ if (local_read(&serial->open_count) == 0)
35150 return;
35151
35152 if (status == 0) {
35153@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35154 spin_unlock_irq(&serial->serial_lock);
35155
35156 /* check for port already opened, if not set the termios */
35157- serial->open_count++;
35158- if (serial->open_count == 1) {
35159+ if (local_inc_return(&serial->open_count) == 1) {
35160 serial->rx_state = RX_IDLE;
35161 /* Force default termio settings */
35162 _hso_serial_set_termios(tty, NULL);
35163@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35164 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
35165 if (result) {
35166 hso_stop_serial_device(serial->parent);
35167- serial->open_count--;
35168+ local_dec(&serial->open_count);
35169 kref_put(&serial->parent->ref, hso_serial_ref_free);
35170 }
35171 } else {
35172@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
35173
35174 /* reset the rts and dtr */
35175 /* do the actual close */
35176- serial->open_count--;
35177+ local_dec(&serial->open_count);
35178
35179- if (serial->open_count <= 0) {
35180- serial->open_count = 0;
35181+ if (local_read(&serial->open_count) <= 0) {
35182+ local_set(&serial->open_count, 0);
35183 spin_lock_irq(&serial->serial_lock);
35184 if (serial->tty == tty) {
35185 serial->tty->driver_data = NULL;
35186@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
35187
35188 /* the actual setup */
35189 spin_lock_irqsave(&serial->serial_lock, flags);
35190- if (serial->open_count)
35191+ if (local_read(&serial->open_count))
35192 _hso_serial_set_termios(tty, old);
35193 else
35194 tty->termios = old;
35195@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
35196 D1("Pending read interrupt on port %d\n", i);
35197 spin_lock(&serial->serial_lock);
35198 if (serial->rx_state == RX_IDLE &&
35199- serial->open_count > 0) {
35200+ local_read(&serial->open_count) > 0) {
35201 /* Setup and send a ctrl req read on
35202 * port i */
35203 if (!serial->rx_urb_filled[0]) {
35204@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
35205 /* Start all serial ports */
35206 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
35207 if (serial_table[i] && (serial_table[i]->interface == iface)) {
35208- if (dev2ser(serial_table[i])->open_count) {
35209+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
35210 result =
35211 hso_start_serial_device(serial_table[i], GFP_NOIO);
35212 hso_kick_transmit(dev2ser(serial_table[i]));
35213diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
35214index c54b7d37..af1f359 100644
35215--- a/drivers/net/wireless/ath/ath.h
35216+++ b/drivers/net/wireless/ath/ath.h
35217@@ -119,6 +119,7 @@ struct ath_ops {
35218 void (*write_flush) (void *);
35219 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
35220 };
35221+typedef struct ath_ops __no_const ath_ops_no_const;
35222
35223 struct ath_common;
35224 struct ath_bus_ops;
35225diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35226index aa2abaf..5f5152d 100644
35227--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35228+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35229@@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35230 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
35231 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
35232
35233- ACCESS_ONCE(ads->ds_link) = i->link;
35234- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
35235+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
35236+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
35237
35238 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
35239 ctl6 = SM(i->keytype, AR_EncrType);
35240@@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35241
35242 if ((i->is_first || i->is_last) &&
35243 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
35244- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
35245+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
35246 | set11nTries(i->rates, 1)
35247 | set11nTries(i->rates, 2)
35248 | set11nTries(i->rates, 3)
35249 | (i->dur_update ? AR_DurUpdateEna : 0)
35250 | SM(0, AR_BurstDur);
35251
35252- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
35253+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
35254 | set11nRate(i->rates, 1)
35255 | set11nRate(i->rates, 2)
35256 | set11nRate(i->rates, 3);
35257 } else {
35258- ACCESS_ONCE(ads->ds_ctl2) = 0;
35259- ACCESS_ONCE(ads->ds_ctl3) = 0;
35260+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
35261+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
35262 }
35263
35264 if (!i->is_first) {
35265- ACCESS_ONCE(ads->ds_ctl0) = 0;
35266- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35267- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35268+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
35269+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35270+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35271 return;
35272 }
35273
35274@@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35275 break;
35276 }
35277
35278- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35279+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35280 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35281 | SM(i->txpower, AR_XmitPower)
35282 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35283@@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35284 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
35285 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
35286
35287- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35288- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35289+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35290+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35291
35292 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
35293 return;
35294
35295- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35296+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35297 | set11nPktDurRTSCTS(i->rates, 1);
35298
35299- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35300+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35301 | set11nPktDurRTSCTS(i->rates, 3);
35302
35303- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35304+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35305 | set11nRateFlags(i->rates, 1)
35306 | set11nRateFlags(i->rates, 2)
35307 | set11nRateFlags(i->rates, 3)
35308diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35309index a66a13b..0ef399e 100644
35310--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35311+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35312@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35313 (i->qcu << AR_TxQcuNum_S) | desc_len;
35314
35315 checksum += val;
35316- ACCESS_ONCE(ads->info) = val;
35317+ ACCESS_ONCE_RW(ads->info) = val;
35318
35319 checksum += i->link;
35320- ACCESS_ONCE(ads->link) = i->link;
35321+ ACCESS_ONCE_RW(ads->link) = i->link;
35322
35323 checksum += i->buf_addr[0];
35324- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
35325+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
35326 checksum += i->buf_addr[1];
35327- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
35328+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
35329 checksum += i->buf_addr[2];
35330- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
35331+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
35332 checksum += i->buf_addr[3];
35333- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
35334+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
35335
35336 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
35337- ACCESS_ONCE(ads->ctl3) = val;
35338+ ACCESS_ONCE_RW(ads->ctl3) = val;
35339 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
35340- ACCESS_ONCE(ads->ctl5) = val;
35341+ ACCESS_ONCE_RW(ads->ctl5) = val;
35342 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
35343- ACCESS_ONCE(ads->ctl7) = val;
35344+ ACCESS_ONCE_RW(ads->ctl7) = val;
35345 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
35346- ACCESS_ONCE(ads->ctl9) = val;
35347+ ACCESS_ONCE_RW(ads->ctl9) = val;
35348
35349 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
35350- ACCESS_ONCE(ads->ctl10) = checksum;
35351+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
35352
35353 if (i->is_first || i->is_last) {
35354- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
35355+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
35356 | set11nTries(i->rates, 1)
35357 | set11nTries(i->rates, 2)
35358 | set11nTries(i->rates, 3)
35359 | (i->dur_update ? AR_DurUpdateEna : 0)
35360 | SM(0, AR_BurstDur);
35361
35362- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
35363+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
35364 | set11nRate(i->rates, 1)
35365 | set11nRate(i->rates, 2)
35366 | set11nRate(i->rates, 3);
35367 } else {
35368- ACCESS_ONCE(ads->ctl13) = 0;
35369- ACCESS_ONCE(ads->ctl14) = 0;
35370+ ACCESS_ONCE_RW(ads->ctl13) = 0;
35371+ ACCESS_ONCE_RW(ads->ctl14) = 0;
35372 }
35373
35374 ads->ctl20 = 0;
35375@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35376
35377 ctl17 = SM(i->keytype, AR_EncrType);
35378 if (!i->is_first) {
35379- ACCESS_ONCE(ads->ctl11) = 0;
35380- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35381- ACCESS_ONCE(ads->ctl15) = 0;
35382- ACCESS_ONCE(ads->ctl16) = 0;
35383- ACCESS_ONCE(ads->ctl17) = ctl17;
35384- ACCESS_ONCE(ads->ctl18) = 0;
35385- ACCESS_ONCE(ads->ctl19) = 0;
35386+ ACCESS_ONCE_RW(ads->ctl11) = 0;
35387+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35388+ ACCESS_ONCE_RW(ads->ctl15) = 0;
35389+ ACCESS_ONCE_RW(ads->ctl16) = 0;
35390+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35391+ ACCESS_ONCE_RW(ads->ctl18) = 0;
35392+ ACCESS_ONCE_RW(ads->ctl19) = 0;
35393 return;
35394 }
35395
35396- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35397+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35398 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35399 | SM(i->txpower, AR_XmitPower)
35400 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35401@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35402 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
35403 ctl12 |= SM(val, AR_PAPRDChainMask);
35404
35405- ACCESS_ONCE(ads->ctl12) = ctl12;
35406- ACCESS_ONCE(ads->ctl17) = ctl17;
35407+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
35408+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35409
35410- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35411+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35412 | set11nPktDurRTSCTS(i->rates, 1);
35413
35414- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35415+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35416 | set11nPktDurRTSCTS(i->rates, 3);
35417
35418- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
35419+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
35420 | set11nRateFlags(i->rates, 1)
35421 | set11nRateFlags(i->rates, 2)
35422 | set11nRateFlags(i->rates, 3)
35423 | SM(i->rtscts_rate, AR_RTSCTSRate);
35424
35425- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
35426+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
35427 }
35428
35429 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
35430diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
35431index e88f182..4e57f5d 100644
35432--- a/drivers/net/wireless/ath/ath9k/hw.h
35433+++ b/drivers/net/wireless/ath/ath9k/hw.h
35434@@ -614,7 +614,7 @@ struct ath_hw_private_ops {
35435
35436 /* ANI */
35437 void (*ani_cache_ini_regs)(struct ath_hw *ah);
35438-};
35439+} __no_const;
35440
35441 /**
35442 * struct ath_hw_ops - callbacks used by hardware code and driver code
35443@@ -644,7 +644,7 @@ struct ath_hw_ops {
35444 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
35445 struct ath_hw_antcomb_conf *antconf);
35446
35447-};
35448+} __no_const;
35449
35450 struct ath_nf_limits {
35451 s16 max;
35452@@ -664,7 +664,7 @@ enum ath_cal_list {
35453 #define AH_FASTCC 0x4
35454
35455 struct ath_hw {
35456- struct ath_ops reg_ops;
35457+ ath_ops_no_const reg_ops;
35458
35459 struct ieee80211_hw *hw;
35460 struct ath_common common;
35461diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35462index af00e2c..ab04d34 100644
35463--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35464+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35465@@ -545,7 +545,7 @@ struct phy_func_ptr {
35466 void (*carrsuppr)(struct brcms_phy *);
35467 s32 (*rxsigpwr)(struct brcms_phy *, s32);
35468 void (*detach)(struct brcms_phy *);
35469-};
35470+} __no_const;
35471
35472 struct brcms_phy {
35473 struct brcms_phy_pub pubpi_ro;
35474diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
35475index faec404..a5277f1 100644
35476--- a/drivers/net/wireless/iwlegacy/3945-mac.c
35477+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
35478@@ -3611,7 +3611,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
35479 */
35480 if (il3945_mod_params.disable_hw_scan) {
35481 D_INFO("Disabling hw_scan\n");
35482- il3945_mac_ops.hw_scan = NULL;
35483+ pax_open_kernel();
35484+ *(void **)&il3945_mac_ops.hw_scan = NULL;
35485+ pax_close_kernel();
35486 }
35487
35488 D_INFO("*** LOAD DRIVER ***\n");
35489diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
35490index b7ce6a6..5649756 100644
35491--- a/drivers/net/wireless/mac80211_hwsim.c
35492+++ b/drivers/net/wireless/mac80211_hwsim.c
35493@@ -1721,9 +1721,11 @@ static int __init init_mac80211_hwsim(void)
35494 return -EINVAL;
35495
35496 if (fake_hw_scan) {
35497- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35498- mac80211_hwsim_ops.sw_scan_start = NULL;
35499- mac80211_hwsim_ops.sw_scan_complete = NULL;
35500+ pax_open_kernel();
35501+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35502+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
35503+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
35504+ pax_close_kernel();
35505 }
35506
35507 spin_lock_init(&hwsim_radio_lock);
35508diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
35509index 35225e9..95e6bf9 100644
35510--- a/drivers/net/wireless/mwifiex/main.h
35511+++ b/drivers/net/wireless/mwifiex/main.h
35512@@ -537,7 +537,7 @@ struct mwifiex_if_ops {
35513 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
35514 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
35515 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
35516-};
35517+} __no_const;
35518
35519 struct mwifiex_adapter {
35520 u8 iface_type;
35521diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
35522index d66e298..55b0a89 100644
35523--- a/drivers/net/wireless/rndis_wlan.c
35524+++ b/drivers/net/wireless/rndis_wlan.c
35525@@ -1278,7 +1278,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
35526
35527 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
35528
35529- if (rts_threshold < 0 || rts_threshold > 2347)
35530+ if (rts_threshold > 2347)
35531 rts_threshold = 2347;
35532
35533 tmp = cpu_to_le32(rts_threshold);
35534diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
35535index c264dfa..08ee30e 100644
35536--- a/drivers/net/wireless/rt2x00/rt2x00.h
35537+++ b/drivers/net/wireless/rt2x00/rt2x00.h
35538@@ -396,7 +396,7 @@ struct rt2x00_intf {
35539 * for hardware which doesn't support hardware
35540 * sequence counting.
35541 */
35542- atomic_t seqno;
35543+ atomic_unchecked_t seqno;
35544 };
35545
35546 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
35547diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
35548index 50f92d5..f3afc41 100644
35549--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
35550+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
35551@@ -229,9 +229,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
35552 * sequence counter given by mac80211.
35553 */
35554 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
35555- seqno = atomic_add_return(0x10, &intf->seqno);
35556+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
35557 else
35558- seqno = atomic_read(&intf->seqno);
35559+ seqno = atomic_read_unchecked(&intf->seqno);
35560
35561 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
35562 hdr->seq_ctrl |= cpu_to_le16(seqno);
35563diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
35564index 9d8f581..0f6589e 100644
35565--- a/drivers/net/wireless/wl1251/wl1251.h
35566+++ b/drivers/net/wireless/wl1251/wl1251.h
35567@@ -266,7 +266,7 @@ struct wl1251_if_operations {
35568 void (*reset)(struct wl1251 *wl);
35569 void (*enable_irq)(struct wl1251 *wl);
35570 void (*disable_irq)(struct wl1251 *wl);
35571-};
35572+} __no_const;
35573
35574 struct wl1251 {
35575 struct ieee80211_hw *hw;
35576diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
35577index f34b5b2..b5abb9f 100644
35578--- a/drivers/oprofile/buffer_sync.c
35579+++ b/drivers/oprofile/buffer_sync.c
35580@@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
35581 if (cookie == NO_COOKIE)
35582 offset = pc;
35583 if (cookie == INVALID_COOKIE) {
35584- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35585+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35586 offset = pc;
35587 }
35588 if (cookie != last_cookie) {
35589@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
35590 /* add userspace sample */
35591
35592 if (!mm) {
35593- atomic_inc(&oprofile_stats.sample_lost_no_mm);
35594+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35595 return 0;
35596 }
35597
35598 cookie = lookup_dcookie(mm, s->eip, &offset);
35599
35600 if (cookie == INVALID_COOKIE) {
35601- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35602+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35603 return 0;
35604 }
35605
35606@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
35607 /* ignore backtraces if failed to add a sample */
35608 if (state == sb_bt_start) {
35609 state = sb_bt_ignore;
35610- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35611+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35612 }
35613 }
35614 release_mm(mm);
35615diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
35616index c0cc4e7..44d4e54 100644
35617--- a/drivers/oprofile/event_buffer.c
35618+++ b/drivers/oprofile/event_buffer.c
35619@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
35620 }
35621
35622 if (buffer_pos == buffer_size) {
35623- atomic_inc(&oprofile_stats.event_lost_overflow);
35624+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35625 return;
35626 }
35627
35628diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
35629index ed2c3ec..deda85a 100644
35630--- a/drivers/oprofile/oprof.c
35631+++ b/drivers/oprofile/oprof.c
35632@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
35633 if (oprofile_ops.switch_events())
35634 return;
35635
35636- atomic_inc(&oprofile_stats.multiplex_counter);
35637+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35638 start_switch_worker();
35639 }
35640
35641diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
35642index 917d28e..d62d981 100644
35643--- a/drivers/oprofile/oprofile_stats.c
35644+++ b/drivers/oprofile/oprofile_stats.c
35645@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35646 cpu_buf->sample_invalid_eip = 0;
35647 }
35648
35649- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35650- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35651- atomic_set(&oprofile_stats.event_lost_overflow, 0);
35652- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35653- atomic_set(&oprofile_stats.multiplex_counter, 0);
35654+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35655+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35656+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35657+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35658+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35659 }
35660
35661
35662diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
35663index 38b6fc0..b5cbfce 100644
35664--- a/drivers/oprofile/oprofile_stats.h
35665+++ b/drivers/oprofile/oprofile_stats.h
35666@@ -13,11 +13,11 @@
35667 #include <linux/atomic.h>
35668
35669 struct oprofile_stat_struct {
35670- atomic_t sample_lost_no_mm;
35671- atomic_t sample_lost_no_mapping;
35672- atomic_t bt_lost_no_mapping;
35673- atomic_t event_lost_overflow;
35674- atomic_t multiplex_counter;
35675+ atomic_unchecked_t sample_lost_no_mm;
35676+ atomic_unchecked_t sample_lost_no_mapping;
35677+ atomic_unchecked_t bt_lost_no_mapping;
35678+ atomic_unchecked_t event_lost_overflow;
35679+ atomic_unchecked_t multiplex_counter;
35680 };
35681
35682 extern struct oprofile_stat_struct oprofile_stats;
35683diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
35684index 849357c..b83c1e0 100644
35685--- a/drivers/oprofile/oprofilefs.c
35686+++ b/drivers/oprofile/oprofilefs.c
35687@@ -185,7 +185,7 @@ static const struct file_operations atomic_ro_fops = {
35688
35689
35690 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35691- char const *name, atomic_t *val)
35692+ char const *name, atomic_unchecked_t *val)
35693 {
35694 return __oprofilefs_create_file(sb, root, name,
35695 &atomic_ro_fops, 0444, val);
35696diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
35697index 3f56bc0..707d642 100644
35698--- a/drivers/parport/procfs.c
35699+++ b/drivers/parport/procfs.c
35700@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
35701
35702 *ppos += len;
35703
35704- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35705+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35706 }
35707
35708 #ifdef CONFIG_PARPORT_1284
35709@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
35710
35711 *ppos += len;
35712
35713- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35714+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35715 }
35716 #endif /* IEEE1284.3 support. */
35717
35718diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
35719index 9fff878..ad0ad53 100644
35720--- a/drivers/pci/hotplug/cpci_hotplug.h
35721+++ b/drivers/pci/hotplug/cpci_hotplug.h
35722@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35723 int (*hardware_test) (struct slot* slot, u32 value);
35724 u8 (*get_power) (struct slot* slot);
35725 int (*set_power) (struct slot* slot, int value);
35726-};
35727+} __no_const;
35728
35729 struct cpci_hp_controller {
35730 unsigned int irq;
35731diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
35732index 76ba8a1..20ca857 100644
35733--- a/drivers/pci/hotplug/cpqphp_nvram.c
35734+++ b/drivers/pci/hotplug/cpqphp_nvram.c
35735@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
35736
35737 void compaq_nvram_init (void __iomem *rom_start)
35738 {
35739+
35740+#ifndef CONFIG_PAX_KERNEXEC
35741 if (rom_start) {
35742 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35743 }
35744+#endif
35745+
35746 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35747
35748 /* initialize our int15 lock */
35749diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
35750index b500840..d7159d3 100644
35751--- a/drivers/pci/pcie/aspm.c
35752+++ b/drivers/pci/pcie/aspm.c
35753@@ -27,9 +27,9 @@
35754 #define MODULE_PARAM_PREFIX "pcie_aspm."
35755
35756 /* Note: those are not register definitions */
35757-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35758-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35759-#define ASPM_STATE_L1 (4) /* L1 state */
35760+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35761+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35762+#define ASPM_STATE_L1 (4U) /* L1 state */
35763 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35764 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35765
35766diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
35767index 5e1ca3c..08082fe 100644
35768--- a/drivers/pci/probe.c
35769+++ b/drivers/pci/probe.c
35770@@ -215,7 +215,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
35771 u16 orig_cmd;
35772 struct pci_bus_region region;
35773
35774- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
35775+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
35776
35777 if (!dev->mmio_always_on) {
35778 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
35779diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
35780index 27911b5..5b6db88 100644
35781--- a/drivers/pci/proc.c
35782+++ b/drivers/pci/proc.c
35783@@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
35784 static int __init pci_proc_init(void)
35785 {
35786 struct pci_dev *dev = NULL;
35787+
35788+#ifdef CONFIG_GRKERNSEC_PROC_ADD
35789+#ifdef CONFIG_GRKERNSEC_PROC_USER
35790+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
35791+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
35792+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
35793+#endif
35794+#else
35795 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
35796+#endif
35797 proc_create("devices", 0, proc_bus_pci_dir,
35798 &proc_bus_pci_dev_operations);
35799 proc_initialized = 1;
35800diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
35801index d68c000..f6094ca 100644
35802--- a/drivers/platform/x86/thinkpad_acpi.c
35803+++ b/drivers/platform/x86/thinkpad_acpi.c
35804@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
35805 return 0;
35806 }
35807
35808-void static hotkey_mask_warn_incomplete_mask(void)
35809+static void hotkey_mask_warn_incomplete_mask(void)
35810 {
35811 /* log only what the user can fix... */
35812 const u32 wantedmask = hotkey_driver_mask &
35813@@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
35814 }
35815 }
35816
35817-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35818- struct tp_nvram_state *newn,
35819- const u32 event_mask)
35820-{
35821-
35822 #define TPACPI_COMPARE_KEY(__scancode, __member) \
35823 do { \
35824 if ((event_mask & (1 << __scancode)) && \
35825@@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35826 tpacpi_hotkey_send_key(__scancode); \
35827 } while (0)
35828
35829- void issue_volchange(const unsigned int oldvol,
35830- const unsigned int newvol)
35831- {
35832- unsigned int i = oldvol;
35833+static void issue_volchange(const unsigned int oldvol,
35834+ const unsigned int newvol,
35835+ const u32 event_mask)
35836+{
35837+ unsigned int i = oldvol;
35838
35839- while (i > newvol) {
35840- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35841- i--;
35842- }
35843- while (i < newvol) {
35844- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35845- i++;
35846- }
35847+ while (i > newvol) {
35848+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35849+ i--;
35850 }
35851+ while (i < newvol) {
35852+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35853+ i++;
35854+ }
35855+}
35856
35857- void issue_brightnesschange(const unsigned int oldbrt,
35858- const unsigned int newbrt)
35859- {
35860- unsigned int i = oldbrt;
35861+static void issue_brightnesschange(const unsigned int oldbrt,
35862+ const unsigned int newbrt,
35863+ const u32 event_mask)
35864+{
35865+ unsigned int i = oldbrt;
35866
35867- while (i > newbrt) {
35868- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35869- i--;
35870- }
35871- while (i < newbrt) {
35872- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35873- i++;
35874- }
35875+ while (i > newbrt) {
35876+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35877+ i--;
35878+ }
35879+ while (i < newbrt) {
35880+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35881+ i++;
35882 }
35883+}
35884
35885+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35886+ struct tp_nvram_state *newn,
35887+ const u32 event_mask)
35888+{
35889 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
35890 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
35891 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
35892@@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35893 oldn->volume_level != newn->volume_level) {
35894 /* recently muted, or repeated mute keypress, or
35895 * multiple presses ending in mute */
35896- issue_volchange(oldn->volume_level, newn->volume_level);
35897+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35898 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
35899 }
35900 } else {
35901@@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35902 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35903 }
35904 if (oldn->volume_level != newn->volume_level) {
35905- issue_volchange(oldn->volume_level, newn->volume_level);
35906+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35907 } else if (oldn->volume_toggle != newn->volume_toggle) {
35908 /* repeated vol up/down keypress at end of scale ? */
35909 if (newn->volume_level == 0)
35910@@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35911 /* handle brightness */
35912 if (oldn->brightness_level != newn->brightness_level) {
35913 issue_brightnesschange(oldn->brightness_level,
35914- newn->brightness_level);
35915+ newn->brightness_level,
35916+ event_mask);
35917 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
35918 /* repeated key presses that didn't change state */
35919 if (newn->brightness_level == 0)
35920@@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35921 && !tp_features.bright_unkfw)
35922 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35923 }
35924+}
35925
35926 #undef TPACPI_COMPARE_KEY
35927 #undef TPACPI_MAY_SEND_KEY
35928-}
35929
35930 /*
35931 * Polling driver
35932diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
35933index 769d265..a3a05ca 100644
35934--- a/drivers/pnp/pnpbios/bioscalls.c
35935+++ b/drivers/pnp/pnpbios/bioscalls.c
35936@@ -58,7 +58,7 @@ do { \
35937 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
35938 } while(0)
35939
35940-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
35941+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
35942 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
35943
35944 /*
35945@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35946
35947 cpu = get_cpu();
35948 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
35949+
35950+ pax_open_kernel();
35951 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
35952+ pax_close_kernel();
35953
35954 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
35955 spin_lock_irqsave(&pnp_bios_lock, flags);
35956@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35957 :"memory");
35958 spin_unlock_irqrestore(&pnp_bios_lock, flags);
35959
35960+ pax_open_kernel();
35961 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
35962+ pax_close_kernel();
35963+
35964 put_cpu();
35965
35966 /* If we get here and this is set then the PnP BIOS faulted on us. */
35967@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
35968 return status;
35969 }
35970
35971-void pnpbios_calls_init(union pnp_bios_install_struct *header)
35972+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
35973 {
35974 int i;
35975
35976@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35977 pnp_bios_callpoint.offset = header->fields.pm16offset;
35978 pnp_bios_callpoint.segment = PNP_CS16;
35979
35980+ pax_open_kernel();
35981+
35982 for_each_possible_cpu(i) {
35983 struct desc_struct *gdt = get_cpu_gdt_table(i);
35984 if (!gdt)
35985@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35986 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
35987 (unsigned long)__va(header->fields.pm16dseg));
35988 }
35989+
35990+ pax_close_kernel();
35991 }
35992diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
35993index b0ecacb..7c9da2e 100644
35994--- a/drivers/pnp/resource.c
35995+++ b/drivers/pnp/resource.c
35996@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
35997 return 1;
35998
35999 /* check if the resource is valid */
36000- if (*irq < 0 || *irq > 15)
36001+ if (*irq > 15)
36002 return 0;
36003
36004 /* check if the resource is reserved */
36005@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
36006 return 1;
36007
36008 /* check if the resource is valid */
36009- if (*dma < 0 || *dma == 4 || *dma > 7)
36010+ if (*dma == 4 || *dma > 7)
36011 return 0;
36012
36013 /* check if the resource is reserved */
36014diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
36015index 222ccd8..6275fa5 100644
36016--- a/drivers/power/bq27x00_battery.c
36017+++ b/drivers/power/bq27x00_battery.c
36018@@ -72,7 +72,7 @@
36019 struct bq27x00_device_info;
36020 struct bq27x00_access_methods {
36021 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
36022-};
36023+} __no_const;
36024
36025 enum bq27x00_chip { BQ27000, BQ27500 };
36026
36027diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
36028index 4c5b053..104263e 100644
36029--- a/drivers/regulator/max8660.c
36030+++ b/drivers/regulator/max8660.c
36031@@ -385,8 +385,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
36032 max8660->shadow_regs[MAX8660_OVER1] = 5;
36033 } else {
36034 /* Otherwise devices can be toggled via software */
36035- max8660_dcdc_ops.enable = max8660_dcdc_enable;
36036- max8660_dcdc_ops.disable = max8660_dcdc_disable;
36037+ pax_open_kernel();
36038+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
36039+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
36040+ pax_close_kernel();
36041 }
36042
36043 /*
36044diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
36045index 845aa22..99ec402 100644
36046--- a/drivers/regulator/mc13892-regulator.c
36047+++ b/drivers/regulator/mc13892-regulator.c
36048@@ -574,10 +574,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
36049 }
36050 mc13xxx_unlock(mc13892);
36051
36052- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
36053+ pax_open_kernel();
36054+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
36055 = mc13892_vcam_set_mode;
36056- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
36057+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
36058 = mc13892_vcam_get_mode;
36059+ pax_close_kernel();
36060
36061 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
36062 ARRAY_SIZE(mc13892_regulators));
36063diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
36064index cace6d3..f623fda 100644
36065--- a/drivers/rtc/rtc-dev.c
36066+++ b/drivers/rtc/rtc-dev.c
36067@@ -14,6 +14,7 @@
36068 #include <linux/module.h>
36069 #include <linux/rtc.h>
36070 #include <linux/sched.h>
36071+#include <linux/grsecurity.h>
36072 #include "rtc-core.h"
36073
36074 static dev_t rtc_devt;
36075@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
36076 if (copy_from_user(&tm, uarg, sizeof(tm)))
36077 return -EFAULT;
36078
36079+ gr_log_timechange();
36080+
36081 return rtc_set_time(rtc, &tm);
36082
36083 case RTC_PIE_ON:
36084diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
36085index 3fcf627..f334910 100644
36086--- a/drivers/scsi/aacraid/aacraid.h
36087+++ b/drivers/scsi/aacraid/aacraid.h
36088@@ -492,7 +492,7 @@ struct adapter_ops
36089 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
36090 /* Administrative operations */
36091 int (*adapter_comm)(struct aac_dev * dev, int comm);
36092-};
36093+} __no_const;
36094
36095 /*
36096 * Define which interrupt handler needs to be installed
36097diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
36098index 0d279c44..3d25a97 100644
36099--- a/drivers/scsi/aacraid/linit.c
36100+++ b/drivers/scsi/aacraid/linit.c
36101@@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
36102 #elif defined(__devinitconst)
36103 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
36104 #else
36105-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
36106+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
36107 #endif
36108 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
36109 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
36110diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
36111index ff80552..1c4120c 100644
36112--- a/drivers/scsi/aic94xx/aic94xx_init.c
36113+++ b/drivers/scsi/aic94xx/aic94xx_init.c
36114@@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
36115 .lldd_ata_set_dmamode = asd_set_dmamode,
36116 };
36117
36118-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
36119+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
36120 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
36121 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
36122 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
36123diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
36124index 4ad7e36..d004679 100644
36125--- a/drivers/scsi/bfa/bfa.h
36126+++ b/drivers/scsi/bfa/bfa.h
36127@@ -196,7 +196,7 @@ struct bfa_hwif_s {
36128 u32 *end);
36129 int cpe_vec_q0;
36130 int rme_vec_q0;
36131-};
36132+} __no_const;
36133 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
36134
36135 struct bfa_faa_cbfn_s {
36136diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
36137index f0f80e2..8ec946b 100644
36138--- a/drivers/scsi/bfa/bfa_fcpim.c
36139+++ b/drivers/scsi/bfa/bfa_fcpim.c
36140@@ -3715,7 +3715,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
36141
36142 bfa_iotag_attach(fcp);
36143
36144- fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
36145+ fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
36146 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
36147 (fcp->num_itns * sizeof(struct bfa_itn_s));
36148 memset(fcp->itn_arr, 0,
36149@@ -3773,7 +3773,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36150 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
36151 {
36152 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
36153- struct bfa_itn_s *itn;
36154+ bfa_itn_s_no_const *itn;
36155
36156 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
36157 itn->isr = isr;
36158diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
36159index 36f26da..38a34a8 100644
36160--- a/drivers/scsi/bfa/bfa_fcpim.h
36161+++ b/drivers/scsi/bfa/bfa_fcpim.h
36162@@ -37,6 +37,7 @@ struct bfa_iotag_s {
36163 struct bfa_itn_s {
36164 bfa_isr_func_t isr;
36165 };
36166+typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
36167
36168 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36169 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
36170@@ -147,7 +148,7 @@ struct bfa_fcp_mod_s {
36171 struct list_head iotag_tio_free_q; /* free IO resources */
36172 struct list_head iotag_unused_q; /* unused IO resources*/
36173 struct bfa_iotag_s *iotag_arr;
36174- struct bfa_itn_s *itn_arr;
36175+ bfa_itn_s_no_const *itn_arr;
36176 int num_ioim_reqs;
36177 int num_fwtio_reqs;
36178 int num_itns;
36179diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
36180index 1a99d4b..e85d64b 100644
36181--- a/drivers/scsi/bfa/bfa_ioc.h
36182+++ b/drivers/scsi/bfa/bfa_ioc.h
36183@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
36184 bfa_ioc_disable_cbfn_t disable_cbfn;
36185 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36186 bfa_ioc_reset_cbfn_t reset_cbfn;
36187-};
36188+} __no_const;
36189
36190 /*
36191 * IOC event notification mechanism.
36192@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
36193 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
36194 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
36195 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
36196-};
36197+} __no_const;
36198
36199 /*
36200 * Queue element to wait for room in request queue. FIFO order is
36201diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
36202index a3a056a..b9bbc2f 100644
36203--- a/drivers/scsi/hosts.c
36204+++ b/drivers/scsi/hosts.c
36205@@ -42,7 +42,7 @@
36206 #include "scsi_logging.h"
36207
36208
36209-static atomic_t scsi_host_next_hn; /* host_no for next new host */
36210+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36211
36212
36213 static void scsi_host_cls_release(struct device *dev)
36214@@ -360,7 +360,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
36215 * subtract one because we increment first then return, but we need to
36216 * know what the next host number was before increment
36217 */
36218- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36219+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36220 shost->dma_channel = 0xff;
36221
36222 /* These three are default values which can be overridden */
36223diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
36224index 500e20d..ebd3059 100644
36225--- a/drivers/scsi/hpsa.c
36226+++ b/drivers/scsi/hpsa.c
36227@@ -521,7 +521,7 @@ static inline u32 next_command(struct ctlr_info *h)
36228 u32 a;
36229
36230 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
36231- return h->access.command_completed(h);
36232+ return h->access->command_completed(h);
36233
36234 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
36235 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
36236@@ -3002,7 +3002,7 @@ static void start_io(struct ctlr_info *h)
36237 while (!list_empty(&h->reqQ)) {
36238 c = list_entry(h->reqQ.next, struct CommandList, list);
36239 /* can't do anything if fifo is full */
36240- if ((h->access.fifo_full(h))) {
36241+ if ((h->access->fifo_full(h))) {
36242 dev_warn(&h->pdev->dev, "fifo full\n");
36243 break;
36244 }
36245@@ -3012,7 +3012,7 @@ static void start_io(struct ctlr_info *h)
36246 h->Qdepth--;
36247
36248 /* Tell the controller execute command */
36249- h->access.submit_command(h, c);
36250+ h->access->submit_command(h, c);
36251
36252 /* Put job onto the completed Q */
36253 addQ(&h->cmpQ, c);
36254@@ -3021,17 +3021,17 @@ static void start_io(struct ctlr_info *h)
36255
36256 static inline unsigned long get_next_completion(struct ctlr_info *h)
36257 {
36258- return h->access.command_completed(h);
36259+ return h->access->command_completed(h);
36260 }
36261
36262 static inline bool interrupt_pending(struct ctlr_info *h)
36263 {
36264- return h->access.intr_pending(h);
36265+ return h->access->intr_pending(h);
36266 }
36267
36268 static inline long interrupt_not_for_us(struct ctlr_info *h)
36269 {
36270- return (h->access.intr_pending(h) == 0) ||
36271+ return (h->access->intr_pending(h) == 0) ||
36272 (h->interrupts_enabled == 0);
36273 }
36274
36275@@ -3930,7 +3930,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
36276 if (prod_index < 0)
36277 return -ENODEV;
36278 h->product_name = products[prod_index].product_name;
36279- h->access = *(products[prod_index].access);
36280+ h->access = products[prod_index].access;
36281
36282 if (hpsa_board_disabled(h->pdev)) {
36283 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
36284@@ -4175,7 +4175,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
36285
36286 assert_spin_locked(&lockup_detector_lock);
36287 remove_ctlr_from_lockup_detector_list(h);
36288- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36289+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36290 spin_lock_irqsave(&h->lock, flags);
36291 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
36292 spin_unlock_irqrestore(&h->lock, flags);
36293@@ -4355,7 +4355,7 @@ reinit_after_soft_reset:
36294 }
36295
36296 /* make sure the board interrupts are off */
36297- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36298+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36299
36300 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
36301 goto clean2;
36302@@ -4389,7 +4389,7 @@ reinit_after_soft_reset:
36303 * fake ones to scoop up any residual completions.
36304 */
36305 spin_lock_irqsave(&h->lock, flags);
36306- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36307+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36308 spin_unlock_irqrestore(&h->lock, flags);
36309 free_irq(h->intr[h->intr_mode], h);
36310 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
36311@@ -4408,9 +4408,9 @@ reinit_after_soft_reset:
36312 dev_info(&h->pdev->dev, "Board READY.\n");
36313 dev_info(&h->pdev->dev,
36314 "Waiting for stale completions to drain.\n");
36315- h->access.set_intr_mask(h, HPSA_INTR_ON);
36316+ h->access->set_intr_mask(h, HPSA_INTR_ON);
36317 msleep(10000);
36318- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36319+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36320
36321 rc = controller_reset_failed(h->cfgtable);
36322 if (rc)
36323@@ -4431,7 +4431,7 @@ reinit_after_soft_reset:
36324 }
36325
36326 /* Turn the interrupts on so we can service requests */
36327- h->access.set_intr_mask(h, HPSA_INTR_ON);
36328+ h->access->set_intr_mask(h, HPSA_INTR_ON);
36329
36330 hpsa_hba_inquiry(h);
36331 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
36332@@ -4483,7 +4483,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
36333 * To write all data in the battery backed cache to disks
36334 */
36335 hpsa_flush_cache(h);
36336- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36337+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36338 free_irq(h->intr[h->intr_mode], h);
36339 #ifdef CONFIG_PCI_MSI
36340 if (h->msix_vector)
36341@@ -4657,7 +4657,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
36342 return;
36343 }
36344 /* Change the access methods to the performant access methods */
36345- h->access = SA5_performant_access;
36346+ h->access = &SA5_performant_access;
36347 h->transMethod = CFGTBL_Trans_Performant;
36348 }
36349
36350diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
36351index 7b28d54..952f23a 100644
36352--- a/drivers/scsi/hpsa.h
36353+++ b/drivers/scsi/hpsa.h
36354@@ -72,7 +72,7 @@ struct ctlr_info {
36355 unsigned int msix_vector;
36356 unsigned int msi_vector;
36357 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
36358- struct access_method access;
36359+ struct access_method *access;
36360
36361 /* queue and queue Info */
36362 struct list_head reqQ;
36363diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
36364index f2df059..a3a9930 100644
36365--- a/drivers/scsi/ips.h
36366+++ b/drivers/scsi/ips.h
36367@@ -1027,7 +1027,7 @@ typedef struct {
36368 int (*intr)(struct ips_ha *);
36369 void (*enableint)(struct ips_ha *);
36370 uint32_t (*statupd)(struct ips_ha *);
36371-} ips_hw_func_t;
36372+} __no_const ips_hw_func_t;
36373
36374 typedef struct ips_ha {
36375 uint8_t ha_id[IPS_MAX_CHANNELS+1];
36376diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
36377index aceffad..c35c08d 100644
36378--- a/drivers/scsi/libfc/fc_exch.c
36379+++ b/drivers/scsi/libfc/fc_exch.c
36380@@ -105,12 +105,12 @@ struct fc_exch_mgr {
36381 * all together if not used XXX
36382 */
36383 struct {
36384- atomic_t no_free_exch;
36385- atomic_t no_free_exch_xid;
36386- atomic_t xid_not_found;
36387- atomic_t xid_busy;
36388- atomic_t seq_not_found;
36389- atomic_t non_bls_resp;
36390+ atomic_unchecked_t no_free_exch;
36391+ atomic_unchecked_t no_free_exch_xid;
36392+ atomic_unchecked_t xid_not_found;
36393+ atomic_unchecked_t xid_busy;
36394+ atomic_unchecked_t seq_not_found;
36395+ atomic_unchecked_t non_bls_resp;
36396 } stats;
36397 };
36398
36399@@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
36400 /* allocate memory for exchange */
36401 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
36402 if (!ep) {
36403- atomic_inc(&mp->stats.no_free_exch);
36404+ atomic_inc_unchecked(&mp->stats.no_free_exch);
36405 goto out;
36406 }
36407 memset(ep, 0, sizeof(*ep));
36408@@ -780,7 +780,7 @@ out:
36409 return ep;
36410 err:
36411 spin_unlock_bh(&pool->lock);
36412- atomic_inc(&mp->stats.no_free_exch_xid);
36413+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
36414 mempool_free(ep, mp->ep_pool);
36415 return NULL;
36416 }
36417@@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36418 xid = ntohs(fh->fh_ox_id); /* we originated exch */
36419 ep = fc_exch_find(mp, xid);
36420 if (!ep) {
36421- atomic_inc(&mp->stats.xid_not_found);
36422+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36423 reject = FC_RJT_OX_ID;
36424 goto out;
36425 }
36426@@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36427 ep = fc_exch_find(mp, xid);
36428 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36429 if (ep) {
36430- atomic_inc(&mp->stats.xid_busy);
36431+ atomic_inc_unchecked(&mp->stats.xid_busy);
36432 reject = FC_RJT_RX_ID;
36433 goto rel;
36434 }
36435@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36436 }
36437 xid = ep->xid; /* get our XID */
36438 } else if (!ep) {
36439- atomic_inc(&mp->stats.xid_not_found);
36440+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36441 reject = FC_RJT_RX_ID; /* XID not found */
36442 goto out;
36443 }
36444@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36445 } else {
36446 sp = &ep->seq;
36447 if (sp->id != fh->fh_seq_id) {
36448- atomic_inc(&mp->stats.seq_not_found);
36449+ atomic_inc_unchecked(&mp->stats.seq_not_found);
36450 if (f_ctl & FC_FC_END_SEQ) {
36451 /*
36452 * Update sequence_id based on incoming last
36453@@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36454
36455 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36456 if (!ep) {
36457- atomic_inc(&mp->stats.xid_not_found);
36458+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36459 goto out;
36460 }
36461 if (ep->esb_stat & ESB_ST_COMPLETE) {
36462- atomic_inc(&mp->stats.xid_not_found);
36463+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36464 goto rel;
36465 }
36466 if (ep->rxid == FC_XID_UNKNOWN)
36467 ep->rxid = ntohs(fh->fh_rx_id);
36468 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36469- atomic_inc(&mp->stats.xid_not_found);
36470+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36471 goto rel;
36472 }
36473 if (ep->did != ntoh24(fh->fh_s_id) &&
36474 ep->did != FC_FID_FLOGI) {
36475- atomic_inc(&mp->stats.xid_not_found);
36476+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36477 goto rel;
36478 }
36479 sof = fr_sof(fp);
36480@@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36481 sp->ssb_stat |= SSB_ST_RESP;
36482 sp->id = fh->fh_seq_id;
36483 } else if (sp->id != fh->fh_seq_id) {
36484- atomic_inc(&mp->stats.seq_not_found);
36485+ atomic_inc_unchecked(&mp->stats.seq_not_found);
36486 goto rel;
36487 }
36488
36489@@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36490 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
36491
36492 if (!sp)
36493- atomic_inc(&mp->stats.xid_not_found);
36494+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36495 else
36496- atomic_inc(&mp->stats.non_bls_resp);
36497+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
36498
36499 fc_frame_free(fp);
36500 }
36501diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
36502index 441d88a..689ad71 100644
36503--- a/drivers/scsi/libsas/sas_ata.c
36504+++ b/drivers/scsi/libsas/sas_ata.c
36505@@ -529,7 +529,7 @@ static struct ata_port_operations sas_sata_ops = {
36506 .postreset = ata_std_postreset,
36507 .error_handler = ata_std_error_handler,
36508 .post_internal_cmd = sas_ata_post_internal,
36509- .qc_defer = ata_std_qc_defer,
36510+ .qc_defer = ata_std_qc_defer,
36511 .qc_prep = ata_noop_qc_prep,
36512 .qc_issue = sas_ata_qc_issue,
36513 .qc_fill_rtf = sas_ata_qc_fill_rtf,
36514diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
36515index 3a1ffdd..8eb7c71 100644
36516--- a/drivers/scsi/lpfc/lpfc.h
36517+++ b/drivers/scsi/lpfc/lpfc.h
36518@@ -413,7 +413,7 @@ struct lpfc_vport {
36519 struct dentry *debug_nodelist;
36520 struct dentry *vport_debugfs_root;
36521 struct lpfc_debugfs_trc *disc_trc;
36522- atomic_t disc_trc_cnt;
36523+ atomic_unchecked_t disc_trc_cnt;
36524 #endif
36525 uint8_t stat_data_enabled;
36526 uint8_t stat_data_blocked;
36527@@ -826,8 +826,8 @@ struct lpfc_hba {
36528 struct timer_list fabric_block_timer;
36529 unsigned long bit_flags;
36530 #define FABRIC_COMANDS_BLOCKED 0
36531- atomic_t num_rsrc_err;
36532- atomic_t num_cmd_success;
36533+ atomic_unchecked_t num_rsrc_err;
36534+ atomic_unchecked_t num_cmd_success;
36535 unsigned long last_rsrc_error_time;
36536 unsigned long last_ramp_down_time;
36537 unsigned long last_ramp_up_time;
36538@@ -863,7 +863,7 @@ struct lpfc_hba {
36539
36540 struct dentry *debug_slow_ring_trc;
36541 struct lpfc_debugfs_trc *slow_ring_trc;
36542- atomic_t slow_ring_trc_cnt;
36543+ atomic_unchecked_t slow_ring_trc_cnt;
36544 /* iDiag debugfs sub-directory */
36545 struct dentry *idiag_root;
36546 struct dentry *idiag_pci_cfg;
36547diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
36548index af04b0d..8f1a97e 100644
36549--- a/drivers/scsi/lpfc/lpfc_debugfs.c
36550+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
36551@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
36552
36553 #include <linux/debugfs.h>
36554
36555-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36556+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36557 static unsigned long lpfc_debugfs_start_time = 0L;
36558
36559 /* iDiag */
36560@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
36561 lpfc_debugfs_enable = 0;
36562
36563 len = 0;
36564- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36565+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36566 (lpfc_debugfs_max_disc_trc - 1);
36567 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36568 dtp = vport->disc_trc + i;
36569@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
36570 lpfc_debugfs_enable = 0;
36571
36572 len = 0;
36573- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36574+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36575 (lpfc_debugfs_max_slow_ring_trc - 1);
36576 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36577 dtp = phba->slow_ring_trc + i;
36578@@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
36579 !vport || !vport->disc_trc)
36580 return;
36581
36582- index = atomic_inc_return(&vport->disc_trc_cnt) &
36583+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36584 (lpfc_debugfs_max_disc_trc - 1);
36585 dtp = vport->disc_trc + index;
36586 dtp->fmt = fmt;
36587 dtp->data1 = data1;
36588 dtp->data2 = data2;
36589 dtp->data3 = data3;
36590- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36591+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36592 dtp->jif = jiffies;
36593 #endif
36594 return;
36595@@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
36596 !phba || !phba->slow_ring_trc)
36597 return;
36598
36599- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36600+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36601 (lpfc_debugfs_max_slow_ring_trc - 1);
36602 dtp = phba->slow_ring_trc + index;
36603 dtp->fmt = fmt;
36604 dtp->data1 = data1;
36605 dtp->data2 = data2;
36606 dtp->data3 = data3;
36607- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36608+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36609 dtp->jif = jiffies;
36610 #endif
36611 return;
36612@@ -4090,7 +4090,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36613 "slow_ring buffer\n");
36614 goto debug_failed;
36615 }
36616- atomic_set(&phba->slow_ring_trc_cnt, 0);
36617+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36618 memset(phba->slow_ring_trc, 0,
36619 (sizeof(struct lpfc_debugfs_trc) *
36620 lpfc_debugfs_max_slow_ring_trc));
36621@@ -4136,7 +4136,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36622 "buffer\n");
36623 goto debug_failed;
36624 }
36625- atomic_set(&vport->disc_trc_cnt, 0);
36626+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36627
36628 snprintf(name, sizeof(name), "discovery_trace");
36629 vport->debug_disc_trc =
36630diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
36631index 9598fdc..7e9f3d9 100644
36632--- a/drivers/scsi/lpfc/lpfc_init.c
36633+++ b/drivers/scsi/lpfc/lpfc_init.c
36634@@ -10266,8 +10266,10 @@ lpfc_init(void)
36635 "misc_register returned with status %d", error);
36636
36637 if (lpfc_enable_npiv) {
36638- lpfc_transport_functions.vport_create = lpfc_vport_create;
36639- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36640+ pax_open_kernel();
36641+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36642+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36643+ pax_close_kernel();
36644 }
36645 lpfc_transport_template =
36646 fc_attach_transport(&lpfc_transport_functions);
36647diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
36648index 88f3a83..686d3fa 100644
36649--- a/drivers/scsi/lpfc/lpfc_scsi.c
36650+++ b/drivers/scsi/lpfc/lpfc_scsi.c
36651@@ -311,7 +311,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
36652 uint32_t evt_posted;
36653
36654 spin_lock_irqsave(&phba->hbalock, flags);
36655- atomic_inc(&phba->num_rsrc_err);
36656+ atomic_inc_unchecked(&phba->num_rsrc_err);
36657 phba->last_rsrc_error_time = jiffies;
36658
36659 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36660@@ -352,7 +352,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
36661 unsigned long flags;
36662 struct lpfc_hba *phba = vport->phba;
36663 uint32_t evt_posted;
36664- atomic_inc(&phba->num_cmd_success);
36665+ atomic_inc_unchecked(&phba->num_cmd_success);
36666
36667 if (vport->cfg_lun_queue_depth <= queue_depth)
36668 return;
36669@@ -396,8 +396,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36670 unsigned long num_rsrc_err, num_cmd_success;
36671 int i;
36672
36673- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36674- num_cmd_success = atomic_read(&phba->num_cmd_success);
36675+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36676+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36677
36678 vports = lpfc_create_vport_work_array(phba);
36679 if (vports != NULL)
36680@@ -417,8 +417,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36681 }
36682 }
36683 lpfc_destroy_vport_work_array(phba, vports);
36684- atomic_set(&phba->num_rsrc_err, 0);
36685- atomic_set(&phba->num_cmd_success, 0);
36686+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
36687+ atomic_set_unchecked(&phba->num_cmd_success, 0);
36688 }
36689
36690 /**
36691@@ -452,8 +452,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
36692 }
36693 }
36694 lpfc_destroy_vport_work_array(phba, vports);
36695- atomic_set(&phba->num_rsrc_err, 0);
36696- atomic_set(&phba->num_cmd_success, 0);
36697+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
36698+ atomic_set_unchecked(&phba->num_cmd_success, 0);
36699 }
36700
36701 /**
36702diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
36703index ea8a0b4..812a124 100644
36704--- a/drivers/scsi/pmcraid.c
36705+++ b/drivers/scsi/pmcraid.c
36706@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
36707 res->scsi_dev = scsi_dev;
36708 scsi_dev->hostdata = res;
36709 res->change_detected = 0;
36710- atomic_set(&res->read_failures, 0);
36711- atomic_set(&res->write_failures, 0);
36712+ atomic_set_unchecked(&res->read_failures, 0);
36713+ atomic_set_unchecked(&res->write_failures, 0);
36714 rc = 0;
36715 }
36716 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
36717@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
36718
36719 /* If this was a SCSI read/write command keep count of errors */
36720 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
36721- atomic_inc(&res->read_failures);
36722+ atomic_inc_unchecked(&res->read_failures);
36723 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
36724- atomic_inc(&res->write_failures);
36725+ atomic_inc_unchecked(&res->write_failures);
36726
36727 if (!RES_IS_GSCSI(res->cfg_entry) &&
36728 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
36729@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
36730 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36731 * hrrq_id assigned here in queuecommand
36732 */
36733- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36734+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36735 pinstance->num_hrrq;
36736 cmd->cmd_done = pmcraid_io_done;
36737
36738@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
36739 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36740 * hrrq_id assigned here in queuecommand
36741 */
36742- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36743+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36744 pinstance->num_hrrq;
36745
36746 if (request_size) {
36747@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
36748
36749 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
36750 /* add resources only after host is added into system */
36751- if (!atomic_read(&pinstance->expose_resources))
36752+ if (!atomic_read_unchecked(&pinstance->expose_resources))
36753 return;
36754
36755 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
36756@@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
36757 init_waitqueue_head(&pinstance->reset_wait_q);
36758
36759 atomic_set(&pinstance->outstanding_cmds, 0);
36760- atomic_set(&pinstance->last_message_id, 0);
36761- atomic_set(&pinstance->expose_resources, 0);
36762+ atomic_set_unchecked(&pinstance->last_message_id, 0);
36763+ atomic_set_unchecked(&pinstance->expose_resources, 0);
36764
36765 INIT_LIST_HEAD(&pinstance->free_res_q);
36766 INIT_LIST_HEAD(&pinstance->used_res_q);
36767@@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
36768 /* Schedule worker thread to handle CCN and take care of adding and
36769 * removing devices to OS
36770 */
36771- atomic_set(&pinstance->expose_resources, 1);
36772+ atomic_set_unchecked(&pinstance->expose_resources, 1);
36773 schedule_work(&pinstance->worker_q);
36774 return rc;
36775
36776diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
36777index e1d150f..6c6df44 100644
36778--- a/drivers/scsi/pmcraid.h
36779+++ b/drivers/scsi/pmcraid.h
36780@@ -748,7 +748,7 @@ struct pmcraid_instance {
36781 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
36782
36783 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
36784- atomic_t last_message_id;
36785+ atomic_unchecked_t last_message_id;
36786
36787 /* configuration table */
36788 struct pmcraid_config_table *cfg_table;
36789@@ -777,7 +777,7 @@ struct pmcraid_instance {
36790 atomic_t outstanding_cmds;
36791
36792 /* should add/delete resources to mid-layer now ?*/
36793- atomic_t expose_resources;
36794+ atomic_unchecked_t expose_resources;
36795
36796
36797
36798@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
36799 struct pmcraid_config_table_entry_ext cfg_entry_ext;
36800 };
36801 struct scsi_device *scsi_dev; /* Link scsi_device structure */
36802- atomic_t read_failures; /* count of failed READ commands */
36803- atomic_t write_failures; /* count of failed WRITE commands */
36804+ atomic_unchecked_t read_failures; /* count of failed READ commands */
36805+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
36806
36807 /* To indicate add/delete/modify during CCN */
36808 u8 change_detected;
36809diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
36810index a244303..6015eb7 100644
36811--- a/drivers/scsi/qla2xxx/qla_def.h
36812+++ b/drivers/scsi/qla2xxx/qla_def.h
36813@@ -2264,7 +2264,7 @@ struct isp_operations {
36814 int (*start_scsi) (srb_t *);
36815 int (*abort_isp) (struct scsi_qla_host *);
36816 int (*iospace_config)(struct qla_hw_data*);
36817-};
36818+} __no_const;
36819
36820 /* MSI-X Support *************************************************************/
36821
36822diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
36823index 7f2492e..5113877 100644
36824--- a/drivers/scsi/qla4xxx/ql4_def.h
36825+++ b/drivers/scsi/qla4xxx/ql4_def.h
36826@@ -268,7 +268,7 @@ struct ddb_entry {
36827 * (4000 only) */
36828 atomic_t relogin_timer; /* Max Time to wait for
36829 * relogin to complete */
36830- atomic_t relogin_retry_count; /* Num of times relogin has been
36831+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
36832 * retried */
36833 uint32_t default_time2wait; /* Default Min time between
36834 * relogins (+aens) */
36835diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
36836index ee47820..a83b1f4 100644
36837--- a/drivers/scsi/qla4xxx/ql4_os.c
36838+++ b/drivers/scsi/qla4xxx/ql4_os.c
36839@@ -2551,12 +2551,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
36840 */
36841 if (!iscsi_is_session_online(cls_sess)) {
36842 /* Reset retry relogin timer */
36843- atomic_inc(&ddb_entry->relogin_retry_count);
36844+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
36845 DEBUG2(ql4_printk(KERN_INFO, ha,
36846 "%s: index[%d] relogin timed out-retrying"
36847 " relogin (%d), retry (%d)\n", __func__,
36848 ddb_entry->fw_ddb_index,
36849- atomic_read(&ddb_entry->relogin_retry_count),
36850+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
36851 ddb_entry->default_time2wait + 4));
36852 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
36853 atomic_set(&ddb_entry->retry_relogin_timer,
36854@@ -4453,7 +4453,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
36855
36856 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
36857 atomic_set(&ddb_entry->relogin_timer, 0);
36858- atomic_set(&ddb_entry->relogin_retry_count, 0);
36859+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36860 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
36861 ddb_entry->default_relogin_timeout =
36862 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
36863diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
36864index 07322ec..91ccc23 100644
36865--- a/drivers/scsi/scsi.c
36866+++ b/drivers/scsi/scsi.c
36867@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
36868 unsigned long timeout;
36869 int rtn = 0;
36870
36871- atomic_inc(&cmd->device->iorequest_cnt);
36872+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36873
36874 /* check if the device is still usable */
36875 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
36876diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
36877index 4037fd5..a19fcc7 100644
36878--- a/drivers/scsi/scsi_lib.c
36879+++ b/drivers/scsi/scsi_lib.c
36880@@ -1415,7 +1415,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
36881 shost = sdev->host;
36882 scsi_init_cmd_errh(cmd);
36883 cmd->result = DID_NO_CONNECT << 16;
36884- atomic_inc(&cmd->device->iorequest_cnt);
36885+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36886
36887 /*
36888 * SCSI request completion path will do scsi_device_unbusy(),
36889@@ -1441,9 +1441,9 @@ static void scsi_softirq_done(struct request *rq)
36890
36891 INIT_LIST_HEAD(&cmd->eh_entry);
36892
36893- atomic_inc(&cmd->device->iodone_cnt);
36894+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
36895 if (cmd->result)
36896- atomic_inc(&cmd->device->ioerr_cnt);
36897+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
36898
36899 disposition = scsi_decide_disposition(cmd);
36900 if (disposition != SUCCESS &&
36901diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
36902index 04c2a27..9d8bd66 100644
36903--- a/drivers/scsi/scsi_sysfs.c
36904+++ b/drivers/scsi/scsi_sysfs.c
36905@@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
36906 char *buf) \
36907 { \
36908 struct scsi_device *sdev = to_scsi_device(dev); \
36909- unsigned long long count = atomic_read(&sdev->field); \
36910+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
36911 return snprintf(buf, 20, "0x%llx\n", count); \
36912 } \
36913 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
36914diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
36915index 84a1fdf..693b0d6 100644
36916--- a/drivers/scsi/scsi_tgt_lib.c
36917+++ b/drivers/scsi/scsi_tgt_lib.c
36918@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
36919 int err;
36920
36921 dprintk("%lx %u\n", uaddr, len);
36922- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
36923+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
36924 if (err) {
36925 /*
36926 * TODO: need to fixup sg_tablesize, max_segment_size,
36927diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
36928index 80fbe2a..efa223b 100644
36929--- a/drivers/scsi/scsi_transport_fc.c
36930+++ b/drivers/scsi/scsi_transport_fc.c
36931@@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
36932 * Netlink Infrastructure
36933 */
36934
36935-static atomic_t fc_event_seq;
36936+static atomic_unchecked_t fc_event_seq;
36937
36938 /**
36939 * fc_get_event_number - Obtain the next sequential FC event number
36940@@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
36941 u32
36942 fc_get_event_number(void)
36943 {
36944- return atomic_add_return(1, &fc_event_seq);
36945+ return atomic_add_return_unchecked(1, &fc_event_seq);
36946 }
36947 EXPORT_SYMBOL(fc_get_event_number);
36948
36949@@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
36950 {
36951 int error;
36952
36953- atomic_set(&fc_event_seq, 0);
36954+ atomic_set_unchecked(&fc_event_seq, 0);
36955
36956 error = transport_class_register(&fc_host_class);
36957 if (error)
36958@@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
36959 char *cp;
36960
36961 *val = simple_strtoul(buf, &cp, 0);
36962- if ((*cp && (*cp != '\n')) || (*val < 0))
36963+ if (*cp && (*cp != '\n'))
36964 return -EINVAL;
36965 /*
36966 * Check for overflow; dev_loss_tmo is u32
36967diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
36968index 1cf640e..78e9014 100644
36969--- a/drivers/scsi/scsi_transport_iscsi.c
36970+++ b/drivers/scsi/scsi_transport_iscsi.c
36971@@ -79,7 +79,7 @@ struct iscsi_internal {
36972 struct transport_container session_cont;
36973 };
36974
36975-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
36976+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
36977 static struct workqueue_struct *iscsi_eh_timer_workq;
36978
36979 static DEFINE_IDA(iscsi_sess_ida);
36980@@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
36981 int err;
36982
36983 ihost = shost->shost_data;
36984- session->sid = atomic_add_return(1, &iscsi_session_nr);
36985+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
36986
36987 if (target_id == ISCSI_MAX_TARGET) {
36988 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
36989@@ -2940,7 +2940,7 @@ static __init int iscsi_transport_init(void)
36990 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
36991 ISCSI_TRANSPORT_VERSION);
36992
36993- atomic_set(&iscsi_session_nr, 0);
36994+ atomic_set_unchecked(&iscsi_session_nr, 0);
36995
36996 err = class_register(&iscsi_transport_class);
36997 if (err)
36998diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
36999index 21a045e..ec89e03 100644
37000--- a/drivers/scsi/scsi_transport_srp.c
37001+++ b/drivers/scsi/scsi_transport_srp.c
37002@@ -33,7 +33,7 @@
37003 #include "scsi_transport_srp_internal.h"
37004
37005 struct srp_host_attrs {
37006- atomic_t next_port_id;
37007+ atomic_unchecked_t next_port_id;
37008 };
37009 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
37010
37011@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
37012 struct Scsi_Host *shost = dev_to_shost(dev);
37013 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
37014
37015- atomic_set(&srp_host->next_port_id, 0);
37016+ atomic_set_unchecked(&srp_host->next_port_id, 0);
37017 return 0;
37018 }
37019
37020@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
37021 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
37022 rport->roles = ids->roles;
37023
37024- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
37025+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
37026 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
37027
37028 transport_setup_device(&rport->dev);
37029diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
37030index eacd46b..e3f4d62 100644
37031--- a/drivers/scsi/sg.c
37032+++ b/drivers/scsi/sg.c
37033@@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
37034 sdp->disk->disk_name,
37035 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
37036 NULL,
37037- (char *)arg);
37038+ (char __user *)arg);
37039 case BLKTRACESTART:
37040 return blk_trace_startstop(sdp->device->request_queue, 1);
37041 case BLKTRACESTOP:
37042@@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
37043 const struct file_operations * fops;
37044 };
37045
37046-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
37047+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
37048 {"allow_dio", &adio_fops},
37049 {"debug", &debug_fops},
37050 {"def_reserved_size", &dressz_fops},
37051@@ -2332,7 +2332,7 @@ sg_proc_init(void)
37052 if (!sg_proc_sgp)
37053 return 1;
37054 for (k = 0; k < num_leaves; ++k) {
37055- struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
37056+ const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
37057 umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
37058 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
37059 }
37060diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
37061index 3d8f662..070f1a5 100644
37062--- a/drivers/spi/spi.c
37063+++ b/drivers/spi/spi.c
37064@@ -1361,7 +1361,7 @@ int spi_bus_unlock(struct spi_master *master)
37065 EXPORT_SYMBOL_GPL(spi_bus_unlock);
37066
37067 /* portable code must never pass more than 32 bytes */
37068-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
37069+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
37070
37071 static u8 *buf;
37072
37073diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
37074index d91751f..a3a9e36 100644
37075--- a/drivers/staging/octeon/ethernet-rx.c
37076+++ b/drivers/staging/octeon/ethernet-rx.c
37077@@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37078 /* Increment RX stats for virtual ports */
37079 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
37080 #ifdef CONFIG_64BIT
37081- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
37082- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
37083+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
37084+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
37085 #else
37086- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
37087- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
37088+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
37089+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
37090 #endif
37091 }
37092 netif_receive_skb(skb);
37093@@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37094 dev->name);
37095 */
37096 #ifdef CONFIG_64BIT
37097- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
37098+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37099 #else
37100- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
37101+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
37102 #endif
37103 dev_kfree_skb_irq(skb);
37104 }
37105diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
37106index 60cba81..71eb239 100644
37107--- a/drivers/staging/octeon/ethernet.c
37108+++ b/drivers/staging/octeon/ethernet.c
37109@@ -259,11 +259,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
37110 * since the RX tasklet also increments it.
37111 */
37112 #ifdef CONFIG_64BIT
37113- atomic64_add(rx_status.dropped_packets,
37114- (atomic64_t *)&priv->stats.rx_dropped);
37115+ atomic64_add_unchecked(rx_status.dropped_packets,
37116+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37117 #else
37118- atomic_add(rx_status.dropped_packets,
37119- (atomic_t *)&priv->stats.rx_dropped);
37120+ atomic_add_unchecked(rx_status.dropped_packets,
37121+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
37122 #endif
37123 }
37124
37125diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
37126index d3d8727..f9327bb8 100644
37127--- a/drivers/staging/rtl8712/rtl871x_io.h
37128+++ b/drivers/staging/rtl8712/rtl871x_io.h
37129@@ -108,7 +108,7 @@ struct _io_ops {
37130 u8 *pmem);
37131 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
37132 u8 *pmem);
37133-};
37134+} __no_const;
37135
37136 struct io_req {
37137 struct list_head list;
37138diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
37139index c7b5e8b..783d6cb 100644
37140--- a/drivers/staging/sbe-2t3e3/netdev.c
37141+++ b/drivers/staging/sbe-2t3e3/netdev.c
37142@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
37143 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
37144
37145 if (rlen)
37146- if (copy_to_user(data, &resp, rlen))
37147+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
37148 return -EFAULT;
37149
37150 return 0;
37151diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
37152index 42cdafe..2769103 100644
37153--- a/drivers/staging/speakup/speakup_soft.c
37154+++ b/drivers/staging/speakup/speakup_soft.c
37155@@ -241,11 +241,11 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
37156 break;
37157 } else if (!initialized) {
37158 if (*init) {
37159- ch = *init;
37160 init++;
37161 } else {
37162 initialized = 1;
37163 }
37164+ ch = *init;
37165 } else {
37166 ch = synth_buffer_getc();
37167 }
37168diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
37169index c7b888c..c94be93 100644
37170--- a/drivers/staging/usbip/usbip_common.h
37171+++ b/drivers/staging/usbip/usbip_common.h
37172@@ -289,7 +289,7 @@ struct usbip_device {
37173 void (*shutdown)(struct usbip_device *);
37174 void (*reset)(struct usbip_device *);
37175 void (*unusable)(struct usbip_device *);
37176- } eh_ops;
37177+ } __no_const eh_ops;
37178 };
37179
37180 /* usbip_common.c */
37181diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
37182index 88b3298..3783eee 100644
37183--- a/drivers/staging/usbip/vhci.h
37184+++ b/drivers/staging/usbip/vhci.h
37185@@ -88,7 +88,7 @@ struct vhci_hcd {
37186 unsigned resuming:1;
37187 unsigned long re_timeout;
37188
37189- atomic_t seqnum;
37190+ atomic_unchecked_t seqnum;
37191
37192 /*
37193 * NOTE:
37194diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
37195index dca9bf1..80735c9 100644
37196--- a/drivers/staging/usbip/vhci_hcd.c
37197+++ b/drivers/staging/usbip/vhci_hcd.c
37198@@ -488,7 +488,7 @@ static void vhci_tx_urb(struct urb *urb)
37199 return;
37200 }
37201
37202- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37203+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37204 if (priv->seqnum == 0xffff)
37205 dev_info(&urb->dev->dev, "seqnum max\n");
37206
37207@@ -740,7 +740,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
37208 return -ENOMEM;
37209 }
37210
37211- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
37212+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37213 if (unlink->seqnum == 0xffff)
37214 pr_info("seqnum max\n");
37215
37216@@ -928,7 +928,7 @@ static int vhci_start(struct usb_hcd *hcd)
37217 vdev->rhport = rhport;
37218 }
37219
37220- atomic_set(&vhci->seqnum, 0);
37221+ atomic_set_unchecked(&vhci->seqnum, 0);
37222 spin_lock_init(&vhci->lock);
37223
37224 hcd->power_budget = 0; /* no limit */
37225diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
37226index f5fba732..210a16c 100644
37227--- a/drivers/staging/usbip/vhci_rx.c
37228+++ b/drivers/staging/usbip/vhci_rx.c
37229@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
37230 if (!urb) {
37231 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
37232 pr_info("max seqnum %d\n",
37233- atomic_read(&the_controller->seqnum));
37234+ atomic_read_unchecked(&the_controller->seqnum));
37235 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
37236 return;
37237 }
37238diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
37239index 7735027..30eed13 100644
37240--- a/drivers/staging/vt6655/hostap.c
37241+++ b/drivers/staging/vt6655/hostap.c
37242@@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
37243 *
37244 */
37245
37246+static net_device_ops_no_const apdev_netdev_ops;
37247+
37248 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37249 {
37250 PSDevice apdev_priv;
37251 struct net_device *dev = pDevice->dev;
37252 int ret;
37253- const struct net_device_ops apdev_netdev_ops = {
37254- .ndo_start_xmit = pDevice->tx_80211,
37255- };
37256
37257 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37258
37259@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37260 *apdev_priv = *pDevice;
37261 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37262
37263+ /* only half broken now */
37264+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37265 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37266
37267 pDevice->apdev->type = ARPHRD_IEEE80211;
37268diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
37269index 51b5adf..098e320 100644
37270--- a/drivers/staging/vt6656/hostap.c
37271+++ b/drivers/staging/vt6656/hostap.c
37272@@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
37273 *
37274 */
37275
37276+static net_device_ops_no_const apdev_netdev_ops;
37277+
37278 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37279 {
37280 PSDevice apdev_priv;
37281 struct net_device *dev = pDevice->dev;
37282 int ret;
37283- const struct net_device_ops apdev_netdev_ops = {
37284- .ndo_start_xmit = pDevice->tx_80211,
37285- };
37286
37287 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37288
37289@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37290 *apdev_priv = *pDevice;
37291 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37292
37293+ /* only half broken now */
37294+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37295 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37296
37297 pDevice->apdev->type = ARPHRD_IEEE80211;
37298diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
37299index 7843dfd..3db105f 100644
37300--- a/drivers/staging/wlan-ng/hfa384x_usb.c
37301+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
37302@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
37303
37304 struct usbctlx_completor {
37305 int (*complete) (struct usbctlx_completor *);
37306-};
37307+} __no_const;
37308
37309 static int
37310 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
37311diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
37312index 1ca66ea..76f1343 100644
37313--- a/drivers/staging/zcache/tmem.c
37314+++ b/drivers/staging/zcache/tmem.c
37315@@ -39,7 +39,7 @@
37316 * A tmem host implementation must use this function to register callbacks
37317 * for memory allocation.
37318 */
37319-static struct tmem_hostops tmem_hostops;
37320+static tmem_hostops_no_const tmem_hostops;
37321
37322 static void tmem_objnode_tree_init(void);
37323
37324@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
37325 * A tmem host implementation must use this function to register
37326 * callbacks for a page-accessible memory (PAM) implementation
37327 */
37328-static struct tmem_pamops tmem_pamops;
37329+static tmem_pamops_no_const tmem_pamops;
37330
37331 void tmem_register_pamops(struct tmem_pamops *m)
37332 {
37333diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
37334index 0d4aa82..f7832d4 100644
37335--- a/drivers/staging/zcache/tmem.h
37336+++ b/drivers/staging/zcache/tmem.h
37337@@ -180,6 +180,7 @@ struct tmem_pamops {
37338 void (*new_obj)(struct tmem_obj *);
37339 int (*replace_in_obj)(void *, struct tmem_obj *);
37340 };
37341+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
37342 extern void tmem_register_pamops(struct tmem_pamops *m);
37343
37344 /* memory allocation methods provided by the host implementation */
37345@@ -189,6 +190,7 @@ struct tmem_hostops {
37346 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
37347 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
37348 };
37349+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
37350 extern void tmem_register_hostops(struct tmem_hostops *m);
37351
37352 /* core tmem accessor functions */
37353diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
37354index f015839..b15dfc4 100644
37355--- a/drivers/target/target_core_tmr.c
37356+++ b/drivers/target/target_core_tmr.c
37357@@ -327,7 +327,7 @@ static void core_tmr_drain_task_list(
37358 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
37359 cmd->t_task_list_num,
37360 atomic_read(&cmd->t_task_cdbs_left),
37361- atomic_read(&cmd->t_task_cdbs_sent),
37362+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37363 (cmd->transport_state & CMD_T_ACTIVE) != 0,
37364 (cmd->transport_state & CMD_T_STOP) != 0,
37365 (cmd->transport_state & CMD_T_SENT) != 0);
37366diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
37367index 443704f..92d3517 100644
37368--- a/drivers/target/target_core_transport.c
37369+++ b/drivers/target/target_core_transport.c
37370@@ -1355,7 +1355,7 @@ struct se_device *transport_add_device_to_core_hba(
37371 spin_lock_init(&dev->se_port_lock);
37372 spin_lock_init(&dev->se_tmr_lock);
37373 spin_lock_init(&dev->qf_cmd_lock);
37374- atomic_set(&dev->dev_ordered_id, 0);
37375+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
37376
37377 se_dev_set_default_attribs(dev, dev_limits);
37378
37379@@ -1542,7 +1542,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
37380 * Used to determine when ORDERED commands should go from
37381 * Dormant to Active status.
37382 */
37383- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
37384+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
37385 smp_mb__after_atomic_inc();
37386 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
37387 cmd->se_ordered_id, cmd->sam_task_attr,
37388@@ -1956,7 +1956,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
37389 " CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
37390 cmd->t_task_list_num,
37391 atomic_read(&cmd->t_task_cdbs_left),
37392- atomic_read(&cmd->t_task_cdbs_sent),
37393+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37394 atomic_read(&cmd->t_task_cdbs_ex_left),
37395 (cmd->transport_state & CMD_T_ACTIVE) != 0,
37396 (cmd->transport_state & CMD_T_STOP) != 0,
37397@@ -2216,9 +2216,9 @@ check_depth:
37398 cmd = task->task_se_cmd;
37399 spin_lock_irqsave(&cmd->t_state_lock, flags);
37400 task->task_flags |= (TF_ACTIVE | TF_SENT);
37401- atomic_inc(&cmd->t_task_cdbs_sent);
37402+ atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
37403
37404- if (atomic_read(&cmd->t_task_cdbs_sent) ==
37405+ if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
37406 cmd->t_task_list_num)
37407 cmd->transport_state |= CMD_T_SENT;
37408
37409diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
37410index 3436436..772237b 100644
37411--- a/drivers/tty/hvc/hvcs.c
37412+++ b/drivers/tty/hvc/hvcs.c
37413@@ -83,6 +83,7 @@
37414 #include <asm/hvcserver.h>
37415 #include <asm/uaccess.h>
37416 #include <asm/vio.h>
37417+#include <asm/local.h>
37418
37419 /*
37420 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
37421@@ -270,7 +271,7 @@ struct hvcs_struct {
37422 unsigned int index;
37423
37424 struct tty_struct *tty;
37425- int open_count;
37426+ local_t open_count;
37427
37428 /*
37429 * Used to tell the driver kernel_thread what operations need to take
37430@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
37431
37432 spin_lock_irqsave(&hvcsd->lock, flags);
37433
37434- if (hvcsd->open_count > 0) {
37435+ if (local_read(&hvcsd->open_count) > 0) {
37436 spin_unlock_irqrestore(&hvcsd->lock, flags);
37437 printk(KERN_INFO "HVCS: vterm state unchanged. "
37438 "The hvcs device node is still in use.\n");
37439@@ -1138,7 +1139,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
37440 if ((retval = hvcs_partner_connect(hvcsd)))
37441 goto error_release;
37442
37443- hvcsd->open_count = 1;
37444+ local_set(&hvcsd->open_count, 1);
37445 hvcsd->tty = tty;
37446 tty->driver_data = hvcsd;
37447
37448@@ -1172,7 +1173,7 @@ fast_open:
37449
37450 spin_lock_irqsave(&hvcsd->lock, flags);
37451 kref_get(&hvcsd->kref);
37452- hvcsd->open_count++;
37453+ local_inc(&hvcsd->open_count);
37454 hvcsd->todo_mask |= HVCS_SCHED_READ;
37455 spin_unlock_irqrestore(&hvcsd->lock, flags);
37456
37457@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37458 hvcsd = tty->driver_data;
37459
37460 spin_lock_irqsave(&hvcsd->lock, flags);
37461- if (--hvcsd->open_count == 0) {
37462+ if (local_dec_and_test(&hvcsd->open_count)) {
37463
37464 vio_disable_interrupts(hvcsd->vdev);
37465
37466@@ -1242,10 +1243,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37467 free_irq(irq, hvcsd);
37468 kref_put(&hvcsd->kref, destroy_hvcs_struct);
37469 return;
37470- } else if (hvcsd->open_count < 0) {
37471+ } else if (local_read(&hvcsd->open_count) < 0) {
37472 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
37473 " is missmanaged.\n",
37474- hvcsd->vdev->unit_address, hvcsd->open_count);
37475+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
37476 }
37477
37478 spin_unlock_irqrestore(&hvcsd->lock, flags);
37479@@ -1261,7 +1262,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37480
37481 spin_lock_irqsave(&hvcsd->lock, flags);
37482 /* Preserve this so that we know how many kref refs to put */
37483- temp_open_count = hvcsd->open_count;
37484+ temp_open_count = local_read(&hvcsd->open_count);
37485
37486 /*
37487 * Don't kref put inside the spinlock because the destruction
37488@@ -1276,7 +1277,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37489 hvcsd->tty->driver_data = NULL;
37490 hvcsd->tty = NULL;
37491
37492- hvcsd->open_count = 0;
37493+ local_set(&hvcsd->open_count, 0);
37494
37495 /* This will drop any buffered data on the floor which is OK in a hangup
37496 * scenario. */
37497@@ -1347,7 +1348,7 @@ static int hvcs_write(struct tty_struct *tty,
37498 * the middle of a write operation? This is a crummy place to do this
37499 * but we want to keep it all in the spinlock.
37500 */
37501- if (hvcsd->open_count <= 0) {
37502+ if (local_read(&hvcsd->open_count) <= 0) {
37503 spin_unlock_irqrestore(&hvcsd->lock, flags);
37504 return -ENODEV;
37505 }
37506@@ -1421,7 +1422,7 @@ static int hvcs_write_room(struct tty_struct *tty)
37507 {
37508 struct hvcs_struct *hvcsd = tty->driver_data;
37509
37510- if (!hvcsd || hvcsd->open_count <= 0)
37511+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
37512 return 0;
37513
37514 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
37515diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
37516index 4daf962..b4a2281 100644
37517--- a/drivers/tty/ipwireless/tty.c
37518+++ b/drivers/tty/ipwireless/tty.c
37519@@ -29,6 +29,7 @@
37520 #include <linux/tty_driver.h>
37521 #include <linux/tty_flip.h>
37522 #include <linux/uaccess.h>
37523+#include <asm/local.h>
37524
37525 #include "tty.h"
37526 #include "network.h"
37527@@ -51,7 +52,7 @@ struct ipw_tty {
37528 int tty_type;
37529 struct ipw_network *network;
37530 struct tty_struct *linux_tty;
37531- int open_count;
37532+ local_t open_count;
37533 unsigned int control_lines;
37534 struct mutex ipw_tty_mutex;
37535 int tx_bytes_queued;
37536@@ -117,10 +118,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37537 mutex_unlock(&tty->ipw_tty_mutex);
37538 return -ENODEV;
37539 }
37540- if (tty->open_count == 0)
37541+ if (local_read(&tty->open_count) == 0)
37542 tty->tx_bytes_queued = 0;
37543
37544- tty->open_count++;
37545+ local_inc(&tty->open_count);
37546
37547 tty->linux_tty = linux_tty;
37548 linux_tty->driver_data = tty;
37549@@ -136,9 +137,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37550
37551 static void do_ipw_close(struct ipw_tty *tty)
37552 {
37553- tty->open_count--;
37554-
37555- if (tty->open_count == 0) {
37556+ if (local_dec_return(&tty->open_count) == 0) {
37557 struct tty_struct *linux_tty = tty->linux_tty;
37558
37559 if (linux_tty != NULL) {
37560@@ -159,7 +158,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
37561 return;
37562
37563 mutex_lock(&tty->ipw_tty_mutex);
37564- if (tty->open_count == 0) {
37565+ if (local_read(&tty->open_count) == 0) {
37566 mutex_unlock(&tty->ipw_tty_mutex);
37567 return;
37568 }
37569@@ -188,7 +187,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
37570 return;
37571 }
37572
37573- if (!tty->open_count) {
37574+ if (!local_read(&tty->open_count)) {
37575 mutex_unlock(&tty->ipw_tty_mutex);
37576 return;
37577 }
37578@@ -230,7 +229,7 @@ static int ipw_write(struct tty_struct *linux_tty,
37579 return -ENODEV;
37580
37581 mutex_lock(&tty->ipw_tty_mutex);
37582- if (!tty->open_count) {
37583+ if (!local_read(&tty->open_count)) {
37584 mutex_unlock(&tty->ipw_tty_mutex);
37585 return -EINVAL;
37586 }
37587@@ -270,7 +269,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
37588 if (!tty)
37589 return -ENODEV;
37590
37591- if (!tty->open_count)
37592+ if (!local_read(&tty->open_count))
37593 return -EINVAL;
37594
37595 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
37596@@ -312,7 +311,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
37597 if (!tty)
37598 return 0;
37599
37600- if (!tty->open_count)
37601+ if (!local_read(&tty->open_count))
37602 return 0;
37603
37604 return tty->tx_bytes_queued;
37605@@ -393,7 +392,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
37606 if (!tty)
37607 return -ENODEV;
37608
37609- if (!tty->open_count)
37610+ if (!local_read(&tty->open_count))
37611 return -EINVAL;
37612
37613 return get_control_lines(tty);
37614@@ -409,7 +408,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
37615 if (!tty)
37616 return -ENODEV;
37617
37618- if (!tty->open_count)
37619+ if (!local_read(&tty->open_count))
37620 return -EINVAL;
37621
37622 return set_control_lines(tty, set, clear);
37623@@ -423,7 +422,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
37624 if (!tty)
37625 return -ENODEV;
37626
37627- if (!tty->open_count)
37628+ if (!local_read(&tty->open_count))
37629 return -EINVAL;
37630
37631 /* FIXME: Exactly how is the tty object locked here .. */
37632@@ -572,7 +571,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
37633 against a parallel ioctl etc */
37634 mutex_lock(&ttyj->ipw_tty_mutex);
37635 }
37636- while (ttyj->open_count)
37637+ while (local_read(&ttyj->open_count))
37638 do_ipw_close(ttyj);
37639 ipwireless_disassociate_network_ttys(network,
37640 ttyj->channel_idx);
37641diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
37642index c43b683..0a88f1c 100644
37643--- a/drivers/tty/n_gsm.c
37644+++ b/drivers/tty/n_gsm.c
37645@@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
37646 kref_init(&dlci->ref);
37647 mutex_init(&dlci->mutex);
37648 dlci->fifo = &dlci->_fifo;
37649- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
37650+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
37651 kfree(dlci);
37652 return NULL;
37653 }
37654diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
37655index 94b6eda..15f7cec 100644
37656--- a/drivers/tty/n_tty.c
37657+++ b/drivers/tty/n_tty.c
37658@@ -2122,6 +2122,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
37659 {
37660 *ops = tty_ldisc_N_TTY;
37661 ops->owner = NULL;
37662- ops->refcount = ops->flags = 0;
37663+ atomic_set(&ops->refcount, 0);
37664+ ops->flags = 0;
37665 }
37666 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
37667diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
37668index eeae7fa..177a743 100644
37669--- a/drivers/tty/pty.c
37670+++ b/drivers/tty/pty.c
37671@@ -707,8 +707,10 @@ static void __init unix98_pty_init(void)
37672 panic("Couldn't register Unix98 pts driver");
37673
37674 /* Now create the /dev/ptmx special device */
37675+ pax_open_kernel();
37676 tty_default_fops(&ptmx_fops);
37677- ptmx_fops.open = ptmx_open;
37678+ *(void **)&ptmx_fops.open = ptmx_open;
37679+ pax_close_kernel();
37680
37681 cdev_init(&ptmx_cdev, &ptmx_fops);
37682 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
37683diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
37684index 2b42a01..32a2ed3 100644
37685--- a/drivers/tty/serial/kgdboc.c
37686+++ b/drivers/tty/serial/kgdboc.c
37687@@ -24,8 +24,9 @@
37688 #define MAX_CONFIG_LEN 40
37689
37690 static struct kgdb_io kgdboc_io_ops;
37691+static struct kgdb_io kgdboc_io_ops_console;
37692
37693-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37694+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
37695 static int configured = -1;
37696
37697 static char config[MAX_CONFIG_LEN];
37698@@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
37699 kgdboc_unregister_kbd();
37700 if (configured == 1)
37701 kgdb_unregister_io_module(&kgdboc_io_ops);
37702+ else if (configured == 2)
37703+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
37704 }
37705
37706 static int configure_kgdboc(void)
37707@@ -157,13 +160,13 @@ static int configure_kgdboc(void)
37708 int err;
37709 char *cptr = config;
37710 struct console *cons;
37711+ int is_console = 0;
37712
37713 err = kgdboc_option_setup(config);
37714 if (err || !strlen(config) || isspace(config[0]))
37715 goto noconfig;
37716
37717 err = -ENODEV;
37718- kgdboc_io_ops.is_console = 0;
37719 kgdb_tty_driver = NULL;
37720
37721 kgdboc_use_kms = 0;
37722@@ -184,7 +187,7 @@ static int configure_kgdboc(void)
37723 int idx;
37724 if (cons->device && cons->device(cons, &idx) == p &&
37725 idx == tty_line) {
37726- kgdboc_io_ops.is_console = 1;
37727+ is_console = 1;
37728 break;
37729 }
37730 cons = cons->next;
37731@@ -194,12 +197,16 @@ static int configure_kgdboc(void)
37732 kgdb_tty_line = tty_line;
37733
37734 do_register:
37735- err = kgdb_register_io_module(&kgdboc_io_ops);
37736+ if (is_console) {
37737+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
37738+ configured = 2;
37739+ } else {
37740+ err = kgdb_register_io_module(&kgdboc_io_ops);
37741+ configured = 1;
37742+ }
37743 if (err)
37744 goto noconfig;
37745
37746- configured = 1;
37747-
37748 return 0;
37749
37750 noconfig:
37751@@ -213,7 +220,7 @@ noconfig:
37752 static int __init init_kgdboc(void)
37753 {
37754 /* Already configured? */
37755- if (configured == 1)
37756+ if (configured >= 1)
37757 return 0;
37758
37759 return configure_kgdboc();
37760@@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
37761 if (config[len - 1] == '\n')
37762 config[len - 1] = '\0';
37763
37764- if (configured == 1)
37765+ if (configured >= 1)
37766 cleanup_kgdboc();
37767
37768 /* Go and configure with the new params. */
37769@@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
37770 .post_exception = kgdboc_post_exp_handler,
37771 };
37772
37773+static struct kgdb_io kgdboc_io_ops_console = {
37774+ .name = "kgdboc",
37775+ .read_char = kgdboc_get_char,
37776+ .write_char = kgdboc_put_char,
37777+ .pre_exception = kgdboc_pre_exp_handler,
37778+ .post_exception = kgdboc_post_exp_handler,
37779+ .is_console = 1
37780+};
37781+
37782 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
37783 /* This is only available if kgdboc is a built in for early debugging */
37784 static int __init kgdboc_early_init(char *opt)
37785diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
37786index 05728894..b9d44c6 100644
37787--- a/drivers/tty/sysrq.c
37788+++ b/drivers/tty/sysrq.c
37789@@ -865,7 +865,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
37790 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
37791 size_t count, loff_t *ppos)
37792 {
37793- if (count) {
37794+ if (count && capable(CAP_SYS_ADMIN)) {
37795 char c;
37796
37797 if (get_user(c, buf))
37798diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
37799index d939bd7..33d92cd 100644
37800--- a/drivers/tty/tty_io.c
37801+++ b/drivers/tty/tty_io.c
37802@@ -3278,7 +3278,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
37803
37804 void tty_default_fops(struct file_operations *fops)
37805 {
37806- *fops = tty_fops;
37807+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
37808 }
37809
37810 /*
37811diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
37812index 24b95db..9c078d0 100644
37813--- a/drivers/tty/tty_ldisc.c
37814+++ b/drivers/tty/tty_ldisc.c
37815@@ -57,7 +57,7 @@ static void put_ldisc(struct tty_ldisc *ld)
37816 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
37817 struct tty_ldisc_ops *ldo = ld->ops;
37818
37819- ldo->refcount--;
37820+ atomic_dec(&ldo->refcount);
37821 module_put(ldo->owner);
37822 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37823
37824@@ -92,7 +92,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
37825 spin_lock_irqsave(&tty_ldisc_lock, flags);
37826 tty_ldiscs[disc] = new_ldisc;
37827 new_ldisc->num = disc;
37828- new_ldisc->refcount = 0;
37829+ atomic_set(&new_ldisc->refcount, 0);
37830 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37831
37832 return ret;
37833@@ -120,7 +120,7 @@ int tty_unregister_ldisc(int disc)
37834 return -EINVAL;
37835
37836 spin_lock_irqsave(&tty_ldisc_lock, flags);
37837- if (tty_ldiscs[disc]->refcount)
37838+ if (atomic_read(&tty_ldiscs[disc]->refcount))
37839 ret = -EBUSY;
37840 else
37841 tty_ldiscs[disc] = NULL;
37842@@ -141,7 +141,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
37843 if (ldops) {
37844 ret = ERR_PTR(-EAGAIN);
37845 if (try_module_get(ldops->owner)) {
37846- ldops->refcount++;
37847+ atomic_inc(&ldops->refcount);
37848 ret = ldops;
37849 }
37850 }
37851@@ -154,7 +154,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
37852 unsigned long flags;
37853
37854 spin_lock_irqsave(&tty_ldisc_lock, flags);
37855- ldops->refcount--;
37856+ atomic_dec(&ldops->refcount);
37857 module_put(ldops->owner);
37858 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37859 }
37860diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
37861index 3b0c4e3..f98a992 100644
37862--- a/drivers/tty/vt/keyboard.c
37863+++ b/drivers/tty/vt/keyboard.c
37864@@ -663,6 +663,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
37865 kbd->kbdmode == VC_OFF) &&
37866 value != KVAL(K_SAK))
37867 return; /* SAK is allowed even in raw mode */
37868+
37869+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
37870+ {
37871+ void *func = fn_handler[value];
37872+ if (func == fn_show_state || func == fn_show_ptregs ||
37873+ func == fn_show_mem)
37874+ return;
37875+ }
37876+#endif
37877+
37878 fn_handler[value](vc);
37879 }
37880
37881@@ -1812,9 +1822,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
37882 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
37883 return -EFAULT;
37884
37885- if (!capable(CAP_SYS_TTY_CONFIG))
37886- perm = 0;
37887-
37888 switch (cmd) {
37889 case KDGKBENT:
37890 /* Ensure another thread doesn't free it under us */
37891@@ -1829,6 +1836,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
37892 spin_unlock_irqrestore(&kbd_event_lock, flags);
37893 return put_user(val, &user_kbe->kb_value);
37894 case KDSKBENT:
37895+ if (!capable(CAP_SYS_TTY_CONFIG))
37896+ perm = 0;
37897+
37898 if (!perm)
37899 return -EPERM;
37900 if (!i && v == K_NOSUCHMAP) {
37901@@ -1919,9 +1929,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
37902 int i, j, k;
37903 int ret;
37904
37905- if (!capable(CAP_SYS_TTY_CONFIG))
37906- perm = 0;
37907-
37908 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
37909 if (!kbs) {
37910 ret = -ENOMEM;
37911@@ -1955,6 +1962,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
37912 kfree(kbs);
37913 return ((p && *p) ? -EOVERFLOW : 0);
37914 case KDSKBSENT:
37915+ if (!capable(CAP_SYS_TTY_CONFIG))
37916+ perm = 0;
37917+
37918 if (!perm) {
37919 ret = -EPERM;
37920 goto reterr;
37921diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
37922index a783d53..cb30d94 100644
37923--- a/drivers/uio/uio.c
37924+++ b/drivers/uio/uio.c
37925@@ -25,6 +25,7 @@
37926 #include <linux/kobject.h>
37927 #include <linux/cdev.h>
37928 #include <linux/uio_driver.h>
37929+#include <asm/local.h>
37930
37931 #define UIO_MAX_DEVICES (1U << MINORBITS)
37932
37933@@ -32,10 +33,10 @@ struct uio_device {
37934 struct module *owner;
37935 struct device *dev;
37936 int minor;
37937- atomic_t event;
37938+ atomic_unchecked_t event;
37939 struct fasync_struct *async_queue;
37940 wait_queue_head_t wait;
37941- int vma_count;
37942+ local_t vma_count;
37943 struct uio_info *info;
37944 struct kobject *map_dir;
37945 struct kobject *portio_dir;
37946@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
37947 struct device_attribute *attr, char *buf)
37948 {
37949 struct uio_device *idev = dev_get_drvdata(dev);
37950- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
37951+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
37952 }
37953
37954 static struct device_attribute uio_class_attributes[] = {
37955@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
37956 {
37957 struct uio_device *idev = info->uio_dev;
37958
37959- atomic_inc(&idev->event);
37960+ atomic_inc_unchecked(&idev->event);
37961 wake_up_interruptible(&idev->wait);
37962 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
37963 }
37964@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
37965 }
37966
37967 listener->dev = idev;
37968- listener->event_count = atomic_read(&idev->event);
37969+ listener->event_count = atomic_read_unchecked(&idev->event);
37970 filep->private_data = listener;
37971
37972 if (idev->info->open) {
37973@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
37974 return -EIO;
37975
37976 poll_wait(filep, &idev->wait, wait);
37977- if (listener->event_count != atomic_read(&idev->event))
37978+ if (listener->event_count != atomic_read_unchecked(&idev->event))
37979 return POLLIN | POLLRDNORM;
37980 return 0;
37981 }
37982@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
37983 do {
37984 set_current_state(TASK_INTERRUPTIBLE);
37985
37986- event_count = atomic_read(&idev->event);
37987+ event_count = atomic_read_unchecked(&idev->event);
37988 if (event_count != listener->event_count) {
37989 if (copy_to_user(buf, &event_count, count))
37990 retval = -EFAULT;
37991@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
37992 static void uio_vma_open(struct vm_area_struct *vma)
37993 {
37994 struct uio_device *idev = vma->vm_private_data;
37995- idev->vma_count++;
37996+ local_inc(&idev->vma_count);
37997 }
37998
37999 static void uio_vma_close(struct vm_area_struct *vma)
38000 {
38001 struct uio_device *idev = vma->vm_private_data;
38002- idev->vma_count--;
38003+ local_dec(&idev->vma_count);
38004 }
38005
38006 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38007@@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
38008 idev->owner = owner;
38009 idev->info = info;
38010 init_waitqueue_head(&idev->wait);
38011- atomic_set(&idev->event, 0);
38012+ atomic_set_unchecked(&idev->event, 0);
38013
38014 ret = uio_get_minor(idev);
38015 if (ret)
38016diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
38017index 98b89fe..aff824e 100644
38018--- a/drivers/usb/atm/cxacru.c
38019+++ b/drivers/usb/atm/cxacru.c
38020@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
38021 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
38022 if (ret < 2)
38023 return -EINVAL;
38024- if (index < 0 || index > 0x7f)
38025+ if (index > 0x7f)
38026 return -EINVAL;
38027 pos += tmp;
38028
38029diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
38030index d3448ca..d2864ca 100644
38031--- a/drivers/usb/atm/usbatm.c
38032+++ b/drivers/usb/atm/usbatm.c
38033@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38034 if (printk_ratelimit())
38035 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
38036 __func__, vpi, vci);
38037- atomic_inc(&vcc->stats->rx_err);
38038+ atomic_inc_unchecked(&vcc->stats->rx_err);
38039 return;
38040 }
38041
38042@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38043 if (length > ATM_MAX_AAL5_PDU) {
38044 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
38045 __func__, length, vcc);
38046- atomic_inc(&vcc->stats->rx_err);
38047+ atomic_inc_unchecked(&vcc->stats->rx_err);
38048 goto out;
38049 }
38050
38051@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38052 if (sarb->len < pdu_length) {
38053 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
38054 __func__, pdu_length, sarb->len, vcc);
38055- atomic_inc(&vcc->stats->rx_err);
38056+ atomic_inc_unchecked(&vcc->stats->rx_err);
38057 goto out;
38058 }
38059
38060 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
38061 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
38062 __func__, vcc);
38063- atomic_inc(&vcc->stats->rx_err);
38064+ atomic_inc_unchecked(&vcc->stats->rx_err);
38065 goto out;
38066 }
38067
38068@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38069 if (printk_ratelimit())
38070 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
38071 __func__, length);
38072- atomic_inc(&vcc->stats->rx_drop);
38073+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38074 goto out;
38075 }
38076
38077@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38078
38079 vcc->push(vcc, skb);
38080
38081- atomic_inc(&vcc->stats->rx);
38082+ atomic_inc_unchecked(&vcc->stats->rx);
38083 out:
38084 skb_trim(sarb, 0);
38085 }
38086@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
38087 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
38088
38089 usbatm_pop(vcc, skb);
38090- atomic_inc(&vcc->stats->tx);
38091+ atomic_inc_unchecked(&vcc->stats->tx);
38092
38093 skb = skb_dequeue(&instance->sndqueue);
38094 }
38095@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
38096 if (!left--)
38097 return sprintf(page,
38098 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
38099- atomic_read(&atm_dev->stats.aal5.tx),
38100- atomic_read(&atm_dev->stats.aal5.tx_err),
38101- atomic_read(&atm_dev->stats.aal5.rx),
38102- atomic_read(&atm_dev->stats.aal5.rx_err),
38103- atomic_read(&atm_dev->stats.aal5.rx_drop));
38104+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
38105+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
38106+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
38107+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
38108+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
38109
38110 if (!left--) {
38111 if (instance->disconnected)
38112diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
38113index d956965..4179a77 100644
38114--- a/drivers/usb/core/devices.c
38115+++ b/drivers/usb/core/devices.c
38116@@ -126,7 +126,7 @@ static const char format_endpt[] =
38117 * time it gets called.
38118 */
38119 static struct device_connect_event {
38120- atomic_t count;
38121+ atomic_unchecked_t count;
38122 wait_queue_head_t wait;
38123 } device_event = {
38124 .count = ATOMIC_INIT(1),
38125@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
38126
38127 void usbfs_conn_disc_event(void)
38128 {
38129- atomic_add(2, &device_event.count);
38130+ atomic_add_unchecked(2, &device_event.count);
38131 wake_up(&device_event.wait);
38132 }
38133
38134@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
38135
38136 poll_wait(file, &device_event.wait, wait);
38137
38138- event_count = atomic_read(&device_event.count);
38139+ event_count = atomic_read_unchecked(&device_event.count);
38140 if (file->f_version != event_count) {
38141 file->f_version = event_count;
38142 return POLLIN | POLLRDNORM;
38143diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
38144index 1fc8f12..20647c1 100644
38145--- a/drivers/usb/early/ehci-dbgp.c
38146+++ b/drivers/usb/early/ehci-dbgp.c
38147@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
38148
38149 #ifdef CONFIG_KGDB
38150 static struct kgdb_io kgdbdbgp_io_ops;
38151-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
38152+static struct kgdb_io kgdbdbgp_io_ops_console;
38153+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
38154 #else
38155 #define dbgp_kgdb_mode (0)
38156 #endif
38157@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
38158 .write_char = kgdbdbgp_write_char,
38159 };
38160
38161+static struct kgdb_io kgdbdbgp_io_ops_console = {
38162+ .name = "kgdbdbgp",
38163+ .read_char = kgdbdbgp_read_char,
38164+ .write_char = kgdbdbgp_write_char,
38165+ .is_console = 1
38166+};
38167+
38168 static int kgdbdbgp_wait_time;
38169
38170 static int __init kgdbdbgp_parse_config(char *str)
38171@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
38172 ptr++;
38173 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
38174 }
38175- kgdb_register_io_module(&kgdbdbgp_io_ops);
38176- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
38177+ if (early_dbgp_console.index != -1)
38178+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
38179+ else
38180+ kgdb_register_io_module(&kgdbdbgp_io_ops);
38181
38182 return 0;
38183 }
38184diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
38185index d6bea3e..60b250e 100644
38186--- a/drivers/usb/wusbcore/wa-hc.h
38187+++ b/drivers/usb/wusbcore/wa-hc.h
38188@@ -192,7 +192,7 @@ struct wahc {
38189 struct list_head xfer_delayed_list;
38190 spinlock_t xfer_list_lock;
38191 struct work_struct xfer_work;
38192- atomic_t xfer_id_count;
38193+ atomic_unchecked_t xfer_id_count;
38194 };
38195
38196
38197@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
38198 INIT_LIST_HEAD(&wa->xfer_delayed_list);
38199 spin_lock_init(&wa->xfer_list_lock);
38200 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
38201- atomic_set(&wa->xfer_id_count, 1);
38202+ atomic_set_unchecked(&wa->xfer_id_count, 1);
38203 }
38204
38205 /**
38206diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
38207index 57c01ab..8a05959 100644
38208--- a/drivers/usb/wusbcore/wa-xfer.c
38209+++ b/drivers/usb/wusbcore/wa-xfer.c
38210@@ -296,7 +296,7 @@ out:
38211 */
38212 static void wa_xfer_id_init(struct wa_xfer *xfer)
38213 {
38214- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
38215+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
38216 }
38217
38218 /*
38219diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
38220index 51e4c1e..9d87e2a 100644
38221--- a/drivers/vhost/vhost.c
38222+++ b/drivers/vhost/vhost.c
38223@@ -632,7 +632,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
38224 return 0;
38225 }
38226
38227-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
38228+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
38229 {
38230 struct file *eventfp, *filep = NULL,
38231 *pollstart = NULL, *pollstop = NULL;
38232diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
38233index b0b2ac3..89a4399 100644
38234--- a/drivers/video/aty/aty128fb.c
38235+++ b/drivers/video/aty/aty128fb.c
38236@@ -148,7 +148,7 @@ enum {
38237 };
38238
38239 /* Must match above enum */
38240-static const char *r128_family[] __devinitdata = {
38241+static const char *r128_family[] __devinitconst = {
38242 "AGP",
38243 "PCI",
38244 "PRO AGP",
38245diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
38246index 5c3960d..15cf8fc 100644
38247--- a/drivers/video/fbcmap.c
38248+++ b/drivers/video/fbcmap.c
38249@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
38250 rc = -ENODEV;
38251 goto out;
38252 }
38253- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
38254- !info->fbops->fb_setcmap)) {
38255+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
38256 rc = -EINVAL;
38257 goto out1;
38258 }
38259diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
38260index c6ce416..3b9b642 100644
38261--- a/drivers/video/fbmem.c
38262+++ b/drivers/video/fbmem.c
38263@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38264 image->dx += image->width + 8;
38265 }
38266 } else if (rotate == FB_ROTATE_UD) {
38267- for (x = 0; x < num && image->dx >= 0; x++) {
38268+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
38269 info->fbops->fb_imageblit(info, image);
38270 image->dx -= image->width + 8;
38271 }
38272@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38273 image->dy += image->height + 8;
38274 }
38275 } else if (rotate == FB_ROTATE_CCW) {
38276- for (x = 0; x < num && image->dy >= 0; x++) {
38277+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
38278 info->fbops->fb_imageblit(info, image);
38279 image->dy -= image->height + 8;
38280 }
38281@@ -1157,7 +1157,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
38282 return -EFAULT;
38283 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
38284 return -EINVAL;
38285- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
38286+ if (con2fb.framebuffer >= FB_MAX)
38287 return -EINVAL;
38288 if (!registered_fb[con2fb.framebuffer])
38289 request_module("fb%d", con2fb.framebuffer);
38290diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
38291index 5a5d092..265c5ed 100644
38292--- a/drivers/video/geode/gx1fb_core.c
38293+++ b/drivers/video/geode/gx1fb_core.c
38294@@ -29,7 +29,7 @@ static int crt_option = 1;
38295 static char panel_option[32] = "";
38296
38297 /* Modes relevant to the GX1 (taken from modedb.c) */
38298-static const struct fb_videomode __devinitdata gx1_modedb[] = {
38299+static const struct fb_videomode __devinitconst gx1_modedb[] = {
38300 /* 640x480-60 VESA */
38301 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
38302 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
38303diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
38304index 0fad23f..0e9afa4 100644
38305--- a/drivers/video/gxt4500.c
38306+++ b/drivers/video/gxt4500.c
38307@@ -156,7 +156,7 @@ struct gxt4500_par {
38308 static char *mode_option;
38309
38310 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
38311-static const struct fb_videomode defaultmode __devinitdata = {
38312+static const struct fb_videomode defaultmode __devinitconst = {
38313 .refresh = 60,
38314 .xres = 1280,
38315 .yres = 1024,
38316@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
38317 return 0;
38318 }
38319
38320-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
38321+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
38322 .id = "IBM GXT4500P",
38323 .type = FB_TYPE_PACKED_PIXELS,
38324 .visual = FB_VISUAL_PSEUDOCOLOR,
38325diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
38326index 7672d2e..b56437f 100644
38327--- a/drivers/video/i810/i810_accel.c
38328+++ b/drivers/video/i810/i810_accel.c
38329@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
38330 }
38331 }
38332 printk("ringbuffer lockup!!!\n");
38333+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38334 i810_report_error(mmio);
38335 par->dev_flags |= LOCKUP;
38336 info->pixmap.scan_align = 1;
38337diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
38338index b83f361..2b05a91 100644
38339--- a/drivers/video/i810/i810_main.c
38340+++ b/drivers/video/i810/i810_main.c
38341@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
38342 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
38343
38344 /* PCI */
38345-static const char *i810_pci_list[] __devinitdata = {
38346+static const char *i810_pci_list[] __devinitconst = {
38347 "Intel(R) 810 Framebuffer Device" ,
38348 "Intel(R) 810-DC100 Framebuffer Device" ,
38349 "Intel(R) 810E Framebuffer Device" ,
38350diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
38351index de36693..3c63fc2 100644
38352--- a/drivers/video/jz4740_fb.c
38353+++ b/drivers/video/jz4740_fb.c
38354@@ -136,7 +136,7 @@ struct jzfb {
38355 uint32_t pseudo_palette[16];
38356 };
38357
38358-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
38359+static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
38360 .id = "JZ4740 FB",
38361 .type = FB_TYPE_PACKED_PIXELS,
38362 .visual = FB_VISUAL_TRUECOLOR,
38363diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
38364index 3c14e43..eafa544 100644
38365--- a/drivers/video/logo/logo_linux_clut224.ppm
38366+++ b/drivers/video/logo/logo_linux_clut224.ppm
38367@@ -1,1604 +1,1123 @@
38368 P3
38369-# Standard 224-color Linux logo
38370 80 80
38371 255
38372- 0 0 0 0 0 0 0 0 0 0 0 0
38373- 0 0 0 0 0 0 0 0 0 0 0 0
38374- 0 0 0 0 0 0 0 0 0 0 0 0
38375- 0 0 0 0 0 0 0 0 0 0 0 0
38376- 0 0 0 0 0 0 0 0 0 0 0 0
38377- 0 0 0 0 0 0 0 0 0 0 0 0
38378- 0 0 0 0 0 0 0 0 0 0 0 0
38379- 0 0 0 0 0 0 0 0 0 0 0 0
38380- 0 0 0 0 0 0 0 0 0 0 0 0
38381- 6 6 6 6 6 6 10 10 10 10 10 10
38382- 10 10 10 6 6 6 6 6 6 6 6 6
38383- 0 0 0 0 0 0 0 0 0 0 0 0
38384- 0 0 0 0 0 0 0 0 0 0 0 0
38385- 0 0 0 0 0 0 0 0 0 0 0 0
38386- 0 0 0 0 0 0 0 0 0 0 0 0
38387- 0 0 0 0 0 0 0 0 0 0 0 0
38388- 0 0 0 0 0 0 0 0 0 0 0 0
38389- 0 0 0 0 0 0 0 0 0 0 0 0
38390- 0 0 0 0 0 0 0 0 0 0 0 0
38391- 0 0 0 0 0 0 0 0 0 0 0 0
38392- 0 0 0 0 0 0 0 0 0 0 0 0
38393- 0 0 0 0 0 0 0 0 0 0 0 0
38394- 0 0 0 0 0 0 0 0 0 0 0 0
38395- 0 0 0 0 0 0 0 0 0 0 0 0
38396- 0 0 0 0 0 0 0 0 0 0 0 0
38397- 0 0 0 0 0 0 0 0 0 0 0 0
38398- 0 0 0 0 0 0 0 0 0 0 0 0
38399- 0 0 0 0 0 0 0 0 0 0 0 0
38400- 0 0 0 6 6 6 10 10 10 14 14 14
38401- 22 22 22 26 26 26 30 30 30 34 34 34
38402- 30 30 30 30 30 30 26 26 26 18 18 18
38403- 14 14 14 10 10 10 6 6 6 0 0 0
38404- 0 0 0 0 0 0 0 0 0 0 0 0
38405- 0 0 0 0 0 0 0 0 0 0 0 0
38406- 0 0 0 0 0 0 0 0 0 0 0 0
38407- 0 0 0 0 0 0 0 0 0 0 0 0
38408- 0 0 0 0 0 0 0 0 0 0 0 0
38409- 0 0 0 0 0 0 0 0 0 0 0 0
38410- 0 0 0 0 0 0 0 0 0 0 0 0
38411- 0 0 0 0 0 0 0 0 0 0 0 0
38412- 0 0 0 0 0 0 0 0 0 0 0 0
38413- 0 0 0 0 0 1 0 0 1 0 0 0
38414- 0 0 0 0 0 0 0 0 0 0 0 0
38415- 0 0 0 0 0 0 0 0 0 0 0 0
38416- 0 0 0 0 0 0 0 0 0 0 0 0
38417- 0 0 0 0 0 0 0 0 0 0 0 0
38418- 0 0 0 0 0 0 0 0 0 0 0 0
38419- 0 0 0 0 0 0 0 0 0 0 0 0
38420- 6 6 6 14 14 14 26 26 26 42 42 42
38421- 54 54 54 66 66 66 78 78 78 78 78 78
38422- 78 78 78 74 74 74 66 66 66 54 54 54
38423- 42 42 42 26 26 26 18 18 18 10 10 10
38424- 6 6 6 0 0 0 0 0 0 0 0 0
38425- 0 0 0 0 0 0 0 0 0 0 0 0
38426- 0 0 0 0 0 0 0 0 0 0 0 0
38427- 0 0 0 0 0 0 0 0 0 0 0 0
38428- 0 0 0 0 0 0 0 0 0 0 0 0
38429- 0 0 0 0 0 0 0 0 0 0 0 0
38430- 0 0 0 0 0 0 0 0 0 0 0 0
38431- 0 0 0 0 0 0 0 0 0 0 0 0
38432- 0 0 0 0 0 0 0 0 0 0 0 0
38433- 0 0 1 0 0 0 0 0 0 0 0 0
38434- 0 0 0 0 0 0 0 0 0 0 0 0
38435- 0 0 0 0 0 0 0 0 0 0 0 0
38436- 0 0 0 0 0 0 0 0 0 0 0 0
38437- 0 0 0 0 0 0 0 0 0 0 0 0
38438- 0 0 0 0 0 0 0 0 0 0 0 0
38439- 0 0 0 0 0 0 0 0 0 10 10 10
38440- 22 22 22 42 42 42 66 66 66 86 86 86
38441- 66 66 66 38 38 38 38 38 38 22 22 22
38442- 26 26 26 34 34 34 54 54 54 66 66 66
38443- 86 86 86 70 70 70 46 46 46 26 26 26
38444- 14 14 14 6 6 6 0 0 0 0 0 0
38445- 0 0 0 0 0 0 0 0 0 0 0 0
38446- 0 0 0 0 0 0 0 0 0 0 0 0
38447- 0 0 0 0 0 0 0 0 0 0 0 0
38448- 0 0 0 0 0 0 0 0 0 0 0 0
38449- 0 0 0 0 0 0 0 0 0 0 0 0
38450- 0 0 0 0 0 0 0 0 0 0 0 0
38451- 0 0 0 0 0 0 0 0 0 0 0 0
38452- 0 0 0 0 0 0 0 0 0 0 0 0
38453- 0 0 1 0 0 1 0 0 1 0 0 0
38454- 0 0 0 0 0 0 0 0 0 0 0 0
38455- 0 0 0 0 0 0 0 0 0 0 0 0
38456- 0 0 0 0 0 0 0 0 0 0 0 0
38457- 0 0 0 0 0 0 0 0 0 0 0 0
38458- 0 0 0 0 0 0 0 0 0 0 0 0
38459- 0 0 0 0 0 0 10 10 10 26 26 26
38460- 50 50 50 82 82 82 58 58 58 6 6 6
38461- 2 2 6 2 2 6 2 2 6 2 2 6
38462- 2 2 6 2 2 6 2 2 6 2 2 6
38463- 6 6 6 54 54 54 86 86 86 66 66 66
38464- 38 38 38 18 18 18 6 6 6 0 0 0
38465- 0 0 0 0 0 0 0 0 0 0 0 0
38466- 0 0 0 0 0 0 0 0 0 0 0 0
38467- 0 0 0 0 0 0 0 0 0 0 0 0
38468- 0 0 0 0 0 0 0 0 0 0 0 0
38469- 0 0 0 0 0 0 0 0 0 0 0 0
38470- 0 0 0 0 0 0 0 0 0 0 0 0
38471- 0 0 0 0 0 0 0 0 0 0 0 0
38472- 0 0 0 0 0 0 0 0 0 0 0 0
38473- 0 0 0 0 0 0 0 0 0 0 0 0
38474- 0 0 0 0 0 0 0 0 0 0 0 0
38475- 0 0 0 0 0 0 0 0 0 0 0 0
38476- 0 0 0 0 0 0 0 0 0 0 0 0
38477- 0 0 0 0 0 0 0 0 0 0 0 0
38478- 0 0 0 0 0 0 0 0 0 0 0 0
38479- 0 0 0 6 6 6 22 22 22 50 50 50
38480- 78 78 78 34 34 34 2 2 6 2 2 6
38481- 2 2 6 2 2 6 2 2 6 2 2 6
38482- 2 2 6 2 2 6 2 2 6 2 2 6
38483- 2 2 6 2 2 6 6 6 6 70 70 70
38484- 78 78 78 46 46 46 22 22 22 6 6 6
38485- 0 0 0 0 0 0 0 0 0 0 0 0
38486- 0 0 0 0 0 0 0 0 0 0 0 0
38487- 0 0 0 0 0 0 0 0 0 0 0 0
38488- 0 0 0 0 0 0 0 0 0 0 0 0
38489- 0 0 0 0 0 0 0 0 0 0 0 0
38490- 0 0 0 0 0 0 0 0 0 0 0 0
38491- 0 0 0 0 0 0 0 0 0 0 0 0
38492- 0 0 0 0 0 0 0 0 0 0 0 0
38493- 0 0 1 0 0 1 0 0 1 0 0 0
38494- 0 0 0 0 0 0 0 0 0 0 0 0
38495- 0 0 0 0 0 0 0 0 0 0 0 0
38496- 0 0 0 0 0 0 0 0 0 0 0 0
38497- 0 0 0 0 0 0 0 0 0 0 0 0
38498- 0 0 0 0 0 0 0 0 0 0 0 0
38499- 6 6 6 18 18 18 42 42 42 82 82 82
38500- 26 26 26 2 2 6 2 2 6 2 2 6
38501- 2 2 6 2 2 6 2 2 6 2 2 6
38502- 2 2 6 2 2 6 2 2 6 14 14 14
38503- 46 46 46 34 34 34 6 6 6 2 2 6
38504- 42 42 42 78 78 78 42 42 42 18 18 18
38505- 6 6 6 0 0 0 0 0 0 0 0 0
38506- 0 0 0 0 0 0 0 0 0 0 0 0
38507- 0 0 0 0 0 0 0 0 0 0 0 0
38508- 0 0 0 0 0 0 0 0 0 0 0 0
38509- 0 0 0 0 0 0 0 0 0 0 0 0
38510- 0 0 0 0 0 0 0 0 0 0 0 0
38511- 0 0 0 0 0 0 0 0 0 0 0 0
38512- 0 0 0 0 0 0 0 0 0 0 0 0
38513- 0 0 1 0 0 0 0 0 1 0 0 0
38514- 0 0 0 0 0 0 0 0 0 0 0 0
38515- 0 0 0 0 0 0 0 0 0 0 0 0
38516- 0 0 0 0 0 0 0 0 0 0 0 0
38517- 0 0 0 0 0 0 0 0 0 0 0 0
38518- 0 0 0 0 0 0 0 0 0 0 0 0
38519- 10 10 10 30 30 30 66 66 66 58 58 58
38520- 2 2 6 2 2 6 2 2 6 2 2 6
38521- 2 2 6 2 2 6 2 2 6 2 2 6
38522- 2 2 6 2 2 6 2 2 6 26 26 26
38523- 86 86 86 101 101 101 46 46 46 10 10 10
38524- 2 2 6 58 58 58 70 70 70 34 34 34
38525- 10 10 10 0 0 0 0 0 0 0 0 0
38526- 0 0 0 0 0 0 0 0 0 0 0 0
38527- 0 0 0 0 0 0 0 0 0 0 0 0
38528- 0 0 0 0 0 0 0 0 0 0 0 0
38529- 0 0 0 0 0 0 0 0 0 0 0 0
38530- 0 0 0 0 0 0 0 0 0 0 0 0
38531- 0 0 0 0 0 0 0 0 0 0 0 0
38532- 0 0 0 0 0 0 0 0 0 0 0 0
38533- 0 0 1 0 0 1 0 0 1 0 0 0
38534- 0 0 0 0 0 0 0 0 0 0 0 0
38535- 0 0 0 0 0 0 0 0 0 0 0 0
38536- 0 0 0 0 0 0 0 0 0 0 0 0
38537- 0 0 0 0 0 0 0 0 0 0 0 0
38538- 0 0 0 0 0 0 0 0 0 0 0 0
38539- 14 14 14 42 42 42 86 86 86 10 10 10
38540- 2 2 6 2 2 6 2 2 6 2 2 6
38541- 2 2 6 2 2 6 2 2 6 2 2 6
38542- 2 2 6 2 2 6 2 2 6 30 30 30
38543- 94 94 94 94 94 94 58 58 58 26 26 26
38544- 2 2 6 6 6 6 78 78 78 54 54 54
38545- 22 22 22 6 6 6 0 0 0 0 0 0
38546- 0 0 0 0 0 0 0 0 0 0 0 0
38547- 0 0 0 0 0 0 0 0 0 0 0 0
38548- 0 0 0 0 0 0 0 0 0 0 0 0
38549- 0 0 0 0 0 0 0 0 0 0 0 0
38550- 0 0 0 0 0 0 0 0 0 0 0 0
38551- 0 0 0 0 0 0 0 0 0 0 0 0
38552- 0 0 0 0 0 0 0 0 0 0 0 0
38553- 0 0 0 0 0 0 0 0 0 0 0 0
38554- 0 0 0 0 0 0 0 0 0 0 0 0
38555- 0 0 0 0 0 0 0 0 0 0 0 0
38556- 0 0 0 0 0 0 0 0 0 0 0 0
38557- 0 0 0 0 0 0 0 0 0 0 0 0
38558- 0 0 0 0 0 0 0 0 0 6 6 6
38559- 22 22 22 62 62 62 62 62 62 2 2 6
38560- 2 2 6 2 2 6 2 2 6 2 2 6
38561- 2 2 6 2 2 6 2 2 6 2 2 6
38562- 2 2 6 2 2 6 2 2 6 26 26 26
38563- 54 54 54 38 38 38 18 18 18 10 10 10
38564- 2 2 6 2 2 6 34 34 34 82 82 82
38565- 38 38 38 14 14 14 0 0 0 0 0 0
38566- 0 0 0 0 0 0 0 0 0 0 0 0
38567- 0 0 0 0 0 0 0 0 0 0 0 0
38568- 0 0 0 0 0 0 0 0 0 0 0 0
38569- 0 0 0 0 0 0 0 0 0 0 0 0
38570- 0 0 0 0 0 0 0 0 0 0 0 0
38571- 0 0 0 0 0 0 0 0 0 0 0 0
38572- 0 0 0 0 0 0 0 0 0 0 0 0
38573- 0 0 0 0 0 1 0 0 1 0 0 0
38574- 0 0 0 0 0 0 0 0 0 0 0 0
38575- 0 0 0 0 0 0 0 0 0 0 0 0
38576- 0 0 0 0 0 0 0 0 0 0 0 0
38577- 0 0 0 0 0 0 0 0 0 0 0 0
38578- 0 0 0 0 0 0 0 0 0 6 6 6
38579- 30 30 30 78 78 78 30 30 30 2 2 6
38580- 2 2 6 2 2 6 2 2 6 2 2 6
38581- 2 2 6 2 2 6 2 2 6 2 2 6
38582- 2 2 6 2 2 6 2 2 6 10 10 10
38583- 10 10 10 2 2 6 2 2 6 2 2 6
38584- 2 2 6 2 2 6 2 2 6 78 78 78
38585- 50 50 50 18 18 18 6 6 6 0 0 0
38586- 0 0 0 0 0 0 0 0 0 0 0 0
38587- 0 0 0 0 0 0 0 0 0 0 0 0
38588- 0 0 0 0 0 0 0 0 0 0 0 0
38589- 0 0 0 0 0 0 0 0 0 0 0 0
38590- 0 0 0 0 0 0 0 0 0 0 0 0
38591- 0 0 0 0 0 0 0 0 0 0 0 0
38592- 0 0 0 0 0 0 0 0 0 0 0 0
38593- 0 0 1 0 0 0 0 0 0 0 0 0
38594- 0 0 0 0 0 0 0 0 0 0 0 0
38595- 0 0 0 0 0 0 0 0 0 0 0 0
38596- 0 0 0 0 0 0 0 0 0 0 0 0
38597- 0 0 0 0 0 0 0 0 0 0 0 0
38598- 0 0 0 0 0 0 0 0 0 10 10 10
38599- 38 38 38 86 86 86 14 14 14 2 2 6
38600- 2 2 6 2 2 6 2 2 6 2 2 6
38601- 2 2 6 2 2 6 2 2 6 2 2 6
38602- 2 2 6 2 2 6 2 2 6 2 2 6
38603- 2 2 6 2 2 6 2 2 6 2 2 6
38604- 2 2 6 2 2 6 2 2 6 54 54 54
38605- 66 66 66 26 26 26 6 6 6 0 0 0
38606- 0 0 0 0 0 0 0 0 0 0 0 0
38607- 0 0 0 0 0 0 0 0 0 0 0 0
38608- 0 0 0 0 0 0 0 0 0 0 0 0
38609- 0 0 0 0 0 0 0 0 0 0 0 0
38610- 0 0 0 0 0 0 0 0 0 0 0 0
38611- 0 0 0 0 0 0 0 0 0 0 0 0
38612- 0 0 0 0 0 0 0 0 0 0 0 0
38613- 0 0 0 0 0 1 0 0 1 0 0 0
38614- 0 0 0 0 0 0 0 0 0 0 0 0
38615- 0 0 0 0 0 0 0 0 0 0 0 0
38616- 0 0 0 0 0 0 0 0 0 0 0 0
38617- 0 0 0 0 0 0 0 0 0 0 0 0
38618- 0 0 0 0 0 0 0 0 0 14 14 14
38619- 42 42 42 82 82 82 2 2 6 2 2 6
38620- 2 2 6 6 6 6 10 10 10 2 2 6
38621- 2 2 6 2 2 6 2 2 6 2 2 6
38622- 2 2 6 2 2 6 2 2 6 6 6 6
38623- 14 14 14 10 10 10 2 2 6 2 2 6
38624- 2 2 6 2 2 6 2 2 6 18 18 18
38625- 82 82 82 34 34 34 10 10 10 0 0 0
38626- 0 0 0 0 0 0 0 0 0 0 0 0
38627- 0 0 0 0 0 0 0 0 0 0 0 0
38628- 0 0 0 0 0 0 0 0 0 0 0 0
38629- 0 0 0 0 0 0 0 0 0 0 0 0
38630- 0 0 0 0 0 0 0 0 0 0 0 0
38631- 0 0 0 0 0 0 0 0 0 0 0 0
38632- 0 0 0 0 0 0 0 0 0 0 0 0
38633- 0 0 1 0 0 0 0 0 0 0 0 0
38634- 0 0 0 0 0 0 0 0 0 0 0 0
38635- 0 0 0 0 0 0 0 0 0 0 0 0
38636- 0 0 0 0 0 0 0 0 0 0 0 0
38637- 0 0 0 0 0 0 0 0 0 0 0 0
38638- 0 0 0 0 0 0 0 0 0 14 14 14
38639- 46 46 46 86 86 86 2 2 6 2 2 6
38640- 6 6 6 6 6 6 22 22 22 34 34 34
38641- 6 6 6 2 2 6 2 2 6 2 2 6
38642- 2 2 6 2 2 6 18 18 18 34 34 34
38643- 10 10 10 50 50 50 22 22 22 2 2 6
38644- 2 2 6 2 2 6 2 2 6 10 10 10
38645- 86 86 86 42 42 42 14 14 14 0 0 0
38646- 0 0 0 0 0 0 0 0 0 0 0 0
38647- 0 0 0 0 0 0 0 0 0 0 0 0
38648- 0 0 0 0 0 0 0 0 0 0 0 0
38649- 0 0 0 0 0 0 0 0 0 0 0 0
38650- 0 0 0 0 0 0 0 0 0 0 0 0
38651- 0 0 0 0 0 0 0 0 0 0 0 0
38652- 0 0 0 0 0 0 0 0 0 0 0 0
38653- 0 0 1 0 0 1 0 0 1 0 0 0
38654- 0 0 0 0 0 0 0 0 0 0 0 0
38655- 0 0 0 0 0 0 0 0 0 0 0 0
38656- 0 0 0 0 0 0 0 0 0 0 0 0
38657- 0 0 0 0 0 0 0 0 0 0 0 0
38658- 0 0 0 0 0 0 0 0 0 14 14 14
38659- 46 46 46 86 86 86 2 2 6 2 2 6
38660- 38 38 38 116 116 116 94 94 94 22 22 22
38661- 22 22 22 2 2 6 2 2 6 2 2 6
38662- 14 14 14 86 86 86 138 138 138 162 162 162
38663-154 154 154 38 38 38 26 26 26 6 6 6
38664- 2 2 6 2 2 6 2 2 6 2 2 6
38665- 86 86 86 46 46 46 14 14 14 0 0 0
38666- 0 0 0 0 0 0 0 0 0 0 0 0
38667- 0 0 0 0 0 0 0 0 0 0 0 0
38668- 0 0 0 0 0 0 0 0 0 0 0 0
38669- 0 0 0 0 0 0 0 0 0 0 0 0
38670- 0 0 0 0 0 0 0 0 0 0 0 0
38671- 0 0 0 0 0 0 0 0 0 0 0 0
38672- 0 0 0 0 0 0 0 0 0 0 0 0
38673- 0 0 0 0 0 0 0 0 0 0 0 0
38674- 0 0 0 0 0 0 0 0 0 0 0 0
38675- 0 0 0 0 0 0 0 0 0 0 0 0
38676- 0 0 0 0 0 0 0 0 0 0 0 0
38677- 0 0 0 0 0 0 0 0 0 0 0 0
38678- 0 0 0 0 0 0 0 0 0 14 14 14
38679- 46 46 46 86 86 86 2 2 6 14 14 14
38680-134 134 134 198 198 198 195 195 195 116 116 116
38681- 10 10 10 2 2 6 2 2 6 6 6 6
38682-101 98 89 187 187 187 210 210 210 218 218 218
38683-214 214 214 134 134 134 14 14 14 6 6 6
38684- 2 2 6 2 2 6 2 2 6 2 2 6
38685- 86 86 86 50 50 50 18 18 18 6 6 6
38686- 0 0 0 0 0 0 0 0 0 0 0 0
38687- 0 0 0 0 0 0 0 0 0 0 0 0
38688- 0 0 0 0 0 0 0 0 0 0 0 0
38689- 0 0 0 0 0 0 0 0 0 0 0 0
38690- 0 0 0 0 0 0 0 0 0 0 0 0
38691- 0 0 0 0 0 0 0 0 0 0 0 0
38692- 0 0 0 0 0 0 0 0 1 0 0 0
38693- 0 0 1 0 0 1 0 0 1 0 0 0
38694- 0 0 0 0 0 0 0 0 0 0 0 0
38695- 0 0 0 0 0 0 0 0 0 0 0 0
38696- 0 0 0 0 0 0 0 0 0 0 0 0
38697- 0 0 0 0 0 0 0 0 0 0 0 0
38698- 0 0 0 0 0 0 0 0 0 14 14 14
38699- 46 46 46 86 86 86 2 2 6 54 54 54
38700-218 218 218 195 195 195 226 226 226 246 246 246
38701- 58 58 58 2 2 6 2 2 6 30 30 30
38702-210 210 210 253 253 253 174 174 174 123 123 123
38703-221 221 221 234 234 234 74 74 74 2 2 6
38704- 2 2 6 2 2 6 2 2 6 2 2 6
38705- 70 70 70 58 58 58 22 22 22 6 6 6
38706- 0 0 0 0 0 0 0 0 0 0 0 0
38707- 0 0 0 0 0 0 0 0 0 0 0 0
38708- 0 0 0 0 0 0 0 0 0 0 0 0
38709- 0 0 0 0 0 0 0 0 0 0 0 0
38710- 0 0 0 0 0 0 0 0 0 0 0 0
38711- 0 0 0 0 0 0 0 0 0 0 0 0
38712- 0 0 0 0 0 0 0 0 0 0 0 0
38713- 0 0 0 0 0 0 0 0 0 0 0 0
38714- 0 0 0 0 0 0 0 0 0 0 0 0
38715- 0 0 0 0 0 0 0 0 0 0 0 0
38716- 0 0 0 0 0 0 0 0 0 0 0 0
38717- 0 0 0 0 0 0 0 0 0 0 0 0
38718- 0 0 0 0 0 0 0 0 0 14 14 14
38719- 46 46 46 82 82 82 2 2 6 106 106 106
38720-170 170 170 26 26 26 86 86 86 226 226 226
38721-123 123 123 10 10 10 14 14 14 46 46 46
38722-231 231 231 190 190 190 6 6 6 70 70 70
38723- 90 90 90 238 238 238 158 158 158 2 2 6
38724- 2 2 6 2 2 6 2 2 6 2 2 6
38725- 70 70 70 58 58 58 22 22 22 6 6 6
38726- 0 0 0 0 0 0 0 0 0 0 0 0
38727- 0 0 0 0 0 0 0 0 0 0 0 0
38728- 0 0 0 0 0 0 0 0 0 0 0 0
38729- 0 0 0 0 0 0 0 0 0 0 0 0
38730- 0 0 0 0 0 0 0 0 0 0 0 0
38731- 0 0 0 0 0 0 0 0 0 0 0 0
38732- 0 0 0 0 0 0 0 0 1 0 0 0
38733- 0 0 1 0 0 1 0 0 1 0 0 0
38734- 0 0 0 0 0 0 0 0 0 0 0 0
38735- 0 0 0 0 0 0 0 0 0 0 0 0
38736- 0 0 0 0 0 0 0 0 0 0 0 0
38737- 0 0 0 0 0 0 0 0 0 0 0 0
38738- 0 0 0 0 0 0 0 0 0 14 14 14
38739- 42 42 42 86 86 86 6 6 6 116 116 116
38740-106 106 106 6 6 6 70 70 70 149 149 149
38741-128 128 128 18 18 18 38 38 38 54 54 54
38742-221 221 221 106 106 106 2 2 6 14 14 14
38743- 46 46 46 190 190 190 198 198 198 2 2 6
38744- 2 2 6 2 2 6 2 2 6 2 2 6
38745- 74 74 74 62 62 62 22 22 22 6 6 6
38746- 0 0 0 0 0 0 0 0 0 0 0 0
38747- 0 0 0 0 0 0 0 0 0 0 0 0
38748- 0 0 0 0 0 0 0 0 0 0 0 0
38749- 0 0 0 0 0 0 0 0 0 0 0 0
38750- 0 0 0 0 0 0 0 0 0 0 0 0
38751- 0 0 0 0 0 0 0 0 0 0 0 0
38752- 0 0 0 0 0 0 0 0 1 0 0 0
38753- 0 0 1 0 0 0 0 0 1 0 0 0
38754- 0 0 0 0 0 0 0 0 0 0 0 0
38755- 0 0 0 0 0 0 0 0 0 0 0 0
38756- 0 0 0 0 0 0 0 0 0 0 0 0
38757- 0 0 0 0 0 0 0 0 0 0 0 0
38758- 0 0 0 0 0 0 0 0 0 14 14 14
38759- 42 42 42 94 94 94 14 14 14 101 101 101
38760-128 128 128 2 2 6 18 18 18 116 116 116
38761-118 98 46 121 92 8 121 92 8 98 78 10
38762-162 162 162 106 106 106 2 2 6 2 2 6
38763- 2 2 6 195 195 195 195 195 195 6 6 6
38764- 2 2 6 2 2 6 2 2 6 2 2 6
38765- 74 74 74 62 62 62 22 22 22 6 6 6
38766- 0 0 0 0 0 0 0 0 0 0 0 0
38767- 0 0 0 0 0 0 0 0 0 0 0 0
38768- 0 0 0 0 0 0 0 0 0 0 0 0
38769- 0 0 0 0 0 0 0 0 0 0 0 0
38770- 0 0 0 0 0 0 0 0 0 0 0 0
38771- 0 0 0 0 0 0 0 0 0 0 0 0
38772- 0 0 0 0 0 0 0 0 1 0 0 1
38773- 0 0 1 0 0 0 0 0 1 0 0 0
38774- 0 0 0 0 0 0 0 0 0 0 0 0
38775- 0 0 0 0 0 0 0 0 0 0 0 0
38776- 0 0 0 0 0 0 0 0 0 0 0 0
38777- 0 0 0 0 0 0 0 0 0 0 0 0
38778- 0 0 0 0 0 0 0 0 0 10 10 10
38779- 38 38 38 90 90 90 14 14 14 58 58 58
38780-210 210 210 26 26 26 54 38 6 154 114 10
38781-226 170 11 236 186 11 225 175 15 184 144 12
38782-215 174 15 175 146 61 37 26 9 2 2 6
38783- 70 70 70 246 246 246 138 138 138 2 2 6
38784- 2 2 6 2 2 6 2 2 6 2 2 6
38785- 70 70 70 66 66 66 26 26 26 6 6 6
38786- 0 0 0 0 0 0 0 0 0 0 0 0
38787- 0 0 0 0 0 0 0 0 0 0 0 0
38788- 0 0 0 0 0 0 0 0 0 0 0 0
38789- 0 0 0 0 0 0 0 0 0 0 0 0
38790- 0 0 0 0 0 0 0 0 0 0 0 0
38791- 0 0 0 0 0 0 0 0 0 0 0 0
38792- 0 0 0 0 0 0 0 0 0 0 0 0
38793- 0 0 0 0 0 0 0 0 0 0 0 0
38794- 0 0 0 0 0 0 0 0 0 0 0 0
38795- 0 0 0 0 0 0 0 0 0 0 0 0
38796- 0 0 0 0 0 0 0 0 0 0 0 0
38797- 0 0 0 0 0 0 0 0 0 0 0 0
38798- 0 0 0 0 0 0 0 0 0 10 10 10
38799- 38 38 38 86 86 86 14 14 14 10 10 10
38800-195 195 195 188 164 115 192 133 9 225 175 15
38801-239 182 13 234 190 10 232 195 16 232 200 30
38802-245 207 45 241 208 19 232 195 16 184 144 12
38803-218 194 134 211 206 186 42 42 42 2 2 6
38804- 2 2 6 2 2 6 2 2 6 2 2 6
38805- 50 50 50 74 74 74 30 30 30 6 6 6
38806- 0 0 0 0 0 0 0 0 0 0 0 0
38807- 0 0 0 0 0 0 0 0 0 0 0 0
38808- 0 0 0 0 0 0 0 0 0 0 0 0
38809- 0 0 0 0 0 0 0 0 0 0 0 0
38810- 0 0 0 0 0 0 0 0 0 0 0 0
38811- 0 0 0 0 0 0 0 0 0 0 0 0
38812- 0 0 0 0 0 0 0 0 0 0 0 0
38813- 0 0 0 0 0 0 0 0 0 0 0 0
38814- 0 0 0 0 0 0 0 0 0 0 0 0
38815- 0 0 0 0 0 0 0 0 0 0 0 0
38816- 0 0 0 0 0 0 0 0 0 0 0 0
38817- 0 0 0 0 0 0 0 0 0 0 0 0
38818- 0 0 0 0 0 0 0 0 0 10 10 10
38819- 34 34 34 86 86 86 14 14 14 2 2 6
38820-121 87 25 192 133 9 219 162 10 239 182 13
38821-236 186 11 232 195 16 241 208 19 244 214 54
38822-246 218 60 246 218 38 246 215 20 241 208 19
38823-241 208 19 226 184 13 121 87 25 2 2 6
38824- 2 2 6 2 2 6 2 2 6 2 2 6
38825- 50 50 50 82 82 82 34 34 34 10 10 10
38826- 0 0 0 0 0 0 0 0 0 0 0 0
38827- 0 0 0 0 0 0 0 0 0 0 0 0
38828- 0 0 0 0 0 0 0 0 0 0 0 0
38829- 0 0 0 0 0 0 0 0 0 0 0 0
38830- 0 0 0 0 0 0 0 0 0 0 0 0
38831- 0 0 0 0 0 0 0 0 0 0 0 0
38832- 0 0 0 0 0 0 0 0 0 0 0 0
38833- 0 0 0 0 0 0 0 0 0 0 0 0
38834- 0 0 0 0 0 0 0 0 0 0 0 0
38835- 0 0 0 0 0 0 0 0 0 0 0 0
38836- 0 0 0 0 0 0 0 0 0 0 0 0
38837- 0 0 0 0 0 0 0 0 0 0 0 0
38838- 0 0 0 0 0 0 0 0 0 10 10 10
38839- 34 34 34 82 82 82 30 30 30 61 42 6
38840-180 123 7 206 145 10 230 174 11 239 182 13
38841-234 190 10 238 202 15 241 208 19 246 218 74
38842-246 218 38 246 215 20 246 215 20 246 215 20
38843-226 184 13 215 174 15 184 144 12 6 6 6
38844- 2 2 6 2 2 6 2 2 6 2 2 6
38845- 26 26 26 94 94 94 42 42 42 14 14 14
38846- 0 0 0 0 0 0 0 0 0 0 0 0
38847- 0 0 0 0 0 0 0 0 0 0 0 0
38848- 0 0 0 0 0 0 0 0 0 0 0 0
38849- 0 0 0 0 0 0 0 0 0 0 0 0
38850- 0 0 0 0 0 0 0 0 0 0 0 0
38851- 0 0 0 0 0 0 0 0 0 0 0 0
38852- 0 0 0 0 0 0 0 0 0 0 0 0
38853- 0 0 0 0 0 0 0 0 0 0 0 0
38854- 0 0 0 0 0 0 0 0 0 0 0 0
38855- 0 0 0 0 0 0 0 0 0 0 0 0
38856- 0 0 0 0 0 0 0 0 0 0 0 0
38857- 0 0 0 0 0 0 0 0 0 0 0 0
38858- 0 0 0 0 0 0 0 0 0 10 10 10
38859- 30 30 30 78 78 78 50 50 50 104 69 6
38860-192 133 9 216 158 10 236 178 12 236 186 11
38861-232 195 16 241 208 19 244 214 54 245 215 43
38862-246 215 20 246 215 20 241 208 19 198 155 10
38863-200 144 11 216 158 10 156 118 10 2 2 6
38864- 2 2 6 2 2 6 2 2 6 2 2 6
38865- 6 6 6 90 90 90 54 54 54 18 18 18
38866- 6 6 6 0 0 0 0 0 0 0 0 0
38867- 0 0 0 0 0 0 0 0 0 0 0 0
38868- 0 0 0 0 0 0 0 0 0 0 0 0
38869- 0 0 0 0 0 0 0 0 0 0 0 0
38870- 0 0 0 0 0 0 0 0 0 0 0 0
38871- 0 0 0 0 0 0 0 0 0 0 0 0
38872- 0 0 0 0 0 0 0 0 0 0 0 0
38873- 0 0 0 0 0 0 0 0 0 0 0 0
38874- 0 0 0 0 0 0 0 0 0 0 0 0
38875- 0 0 0 0 0 0 0 0 0 0 0 0
38876- 0 0 0 0 0 0 0 0 0 0 0 0
38877- 0 0 0 0 0 0 0 0 0 0 0 0
38878- 0 0 0 0 0 0 0 0 0 10 10 10
38879- 30 30 30 78 78 78 46 46 46 22 22 22
38880-137 92 6 210 162 10 239 182 13 238 190 10
38881-238 202 15 241 208 19 246 215 20 246 215 20
38882-241 208 19 203 166 17 185 133 11 210 150 10
38883-216 158 10 210 150 10 102 78 10 2 2 6
38884- 6 6 6 54 54 54 14 14 14 2 2 6
38885- 2 2 6 62 62 62 74 74 74 30 30 30
38886- 10 10 10 0 0 0 0 0 0 0 0 0
38887- 0 0 0 0 0 0 0 0 0 0 0 0
38888- 0 0 0 0 0 0 0 0 0 0 0 0
38889- 0 0 0 0 0 0 0 0 0 0 0 0
38890- 0 0 0 0 0 0 0 0 0 0 0 0
38891- 0 0 0 0 0 0 0 0 0 0 0 0
38892- 0 0 0 0 0 0 0 0 0 0 0 0
38893- 0 0 0 0 0 0 0 0 0 0 0 0
38894- 0 0 0 0 0 0 0 0 0 0 0 0
38895- 0 0 0 0 0 0 0 0 0 0 0 0
38896- 0 0 0 0 0 0 0 0 0 0 0 0
38897- 0 0 0 0 0 0 0 0 0 0 0 0
38898- 0 0 0 0 0 0 0 0 0 10 10 10
38899- 34 34 34 78 78 78 50 50 50 6 6 6
38900- 94 70 30 139 102 15 190 146 13 226 184 13
38901-232 200 30 232 195 16 215 174 15 190 146 13
38902-168 122 10 192 133 9 210 150 10 213 154 11
38903-202 150 34 182 157 106 101 98 89 2 2 6
38904- 2 2 6 78 78 78 116 116 116 58 58 58
38905- 2 2 6 22 22 22 90 90 90 46 46 46
38906- 18 18 18 6 6 6 0 0 0 0 0 0
38907- 0 0 0 0 0 0 0 0 0 0 0 0
38908- 0 0 0 0 0 0 0 0 0 0 0 0
38909- 0 0 0 0 0 0 0 0 0 0 0 0
38910- 0 0 0 0 0 0 0 0 0 0 0 0
38911- 0 0 0 0 0 0 0 0 0 0 0 0
38912- 0 0 0 0 0 0 0 0 0 0 0 0
38913- 0 0 0 0 0 0 0 0 0 0 0 0
38914- 0 0 0 0 0 0 0 0 0 0 0 0
38915- 0 0 0 0 0 0 0 0 0 0 0 0
38916- 0 0 0 0 0 0 0 0 0 0 0 0
38917- 0 0 0 0 0 0 0 0 0 0 0 0
38918- 0 0 0 0 0 0 0 0 0 10 10 10
38919- 38 38 38 86 86 86 50 50 50 6 6 6
38920-128 128 128 174 154 114 156 107 11 168 122 10
38921-198 155 10 184 144 12 197 138 11 200 144 11
38922-206 145 10 206 145 10 197 138 11 188 164 115
38923-195 195 195 198 198 198 174 174 174 14 14 14
38924- 2 2 6 22 22 22 116 116 116 116 116 116
38925- 22 22 22 2 2 6 74 74 74 70 70 70
38926- 30 30 30 10 10 10 0 0 0 0 0 0
38927- 0 0 0 0 0 0 0 0 0 0 0 0
38928- 0 0 0 0 0 0 0 0 0 0 0 0
38929- 0 0 0 0 0 0 0 0 0 0 0 0
38930- 0 0 0 0 0 0 0 0 0 0 0 0
38931- 0 0 0 0 0 0 0 0 0 0 0 0
38932- 0 0 0 0 0 0 0 0 0 0 0 0
38933- 0 0 0 0 0 0 0 0 0 0 0 0
38934- 0 0 0 0 0 0 0 0 0 0 0 0
38935- 0 0 0 0 0 0 0 0 0 0 0 0
38936- 0 0 0 0 0 0 0 0 0 0 0 0
38937- 0 0 0 0 0 0 0 0 0 0 0 0
38938- 0 0 0 0 0 0 6 6 6 18 18 18
38939- 50 50 50 101 101 101 26 26 26 10 10 10
38940-138 138 138 190 190 190 174 154 114 156 107 11
38941-197 138 11 200 144 11 197 138 11 192 133 9
38942-180 123 7 190 142 34 190 178 144 187 187 187
38943-202 202 202 221 221 221 214 214 214 66 66 66
38944- 2 2 6 2 2 6 50 50 50 62 62 62
38945- 6 6 6 2 2 6 10 10 10 90 90 90
38946- 50 50 50 18 18 18 6 6 6 0 0 0
38947- 0 0 0 0 0 0 0 0 0 0 0 0
38948- 0 0 0 0 0 0 0 0 0 0 0 0
38949- 0 0 0 0 0 0 0 0 0 0 0 0
38950- 0 0 0 0 0 0 0 0 0 0 0 0
38951- 0 0 0 0 0 0 0 0 0 0 0 0
38952- 0 0 0 0 0 0 0 0 0 0 0 0
38953- 0 0 0 0 0 0 0 0 0 0 0 0
38954- 0 0 0 0 0 0 0 0 0 0 0 0
38955- 0 0 0 0 0 0 0 0 0 0 0 0
38956- 0 0 0 0 0 0 0 0 0 0 0 0
38957- 0 0 0 0 0 0 0 0 0 0 0 0
38958- 0 0 0 0 0 0 10 10 10 34 34 34
38959- 74 74 74 74 74 74 2 2 6 6 6 6
38960-144 144 144 198 198 198 190 190 190 178 166 146
38961-154 121 60 156 107 11 156 107 11 168 124 44
38962-174 154 114 187 187 187 190 190 190 210 210 210
38963-246 246 246 253 253 253 253 253 253 182 182 182
38964- 6 6 6 2 2 6 2 2 6 2 2 6
38965- 2 2 6 2 2 6 2 2 6 62 62 62
38966- 74 74 74 34 34 34 14 14 14 0 0 0
38967- 0 0 0 0 0 0 0 0 0 0 0 0
38968- 0 0 0 0 0 0 0 0 0 0 0 0
38969- 0 0 0 0 0 0 0 0 0 0 0 0
38970- 0 0 0 0 0 0 0 0 0 0 0 0
38971- 0 0 0 0 0 0 0 0 0 0 0 0
38972- 0 0 0 0 0 0 0 0 0 0 0 0
38973- 0 0 0 0 0 0 0 0 0 0 0 0
38974- 0 0 0 0 0 0 0 0 0 0 0 0
38975- 0 0 0 0 0 0 0 0 0 0 0 0
38976- 0 0 0 0 0 0 0 0 0 0 0 0
38977- 0 0 0 0 0 0 0 0 0 0 0 0
38978- 0 0 0 10 10 10 22 22 22 54 54 54
38979- 94 94 94 18 18 18 2 2 6 46 46 46
38980-234 234 234 221 221 221 190 190 190 190 190 190
38981-190 190 190 187 187 187 187 187 187 190 190 190
38982-190 190 190 195 195 195 214 214 214 242 242 242
38983-253 253 253 253 253 253 253 253 253 253 253 253
38984- 82 82 82 2 2 6 2 2 6 2 2 6
38985- 2 2 6 2 2 6 2 2 6 14 14 14
38986- 86 86 86 54 54 54 22 22 22 6 6 6
38987- 0 0 0 0 0 0 0 0 0 0 0 0
38988- 0 0 0 0 0 0 0 0 0 0 0 0
38989- 0 0 0 0 0 0 0 0 0 0 0 0
38990- 0 0 0 0 0 0 0 0 0 0 0 0
38991- 0 0 0 0 0 0 0 0 0 0 0 0
38992- 0 0 0 0 0 0 0 0 0 0 0 0
38993- 0 0 0 0 0 0 0 0 0 0 0 0
38994- 0 0 0 0 0 0 0 0 0 0 0 0
38995- 0 0 0 0 0 0 0 0 0 0 0 0
38996- 0 0 0 0 0 0 0 0 0 0 0 0
38997- 0 0 0 0 0 0 0 0 0 0 0 0
38998- 6 6 6 18 18 18 46 46 46 90 90 90
38999- 46 46 46 18 18 18 6 6 6 182 182 182
39000-253 253 253 246 246 246 206 206 206 190 190 190
39001-190 190 190 190 190 190 190 190 190 190 190 190
39002-206 206 206 231 231 231 250 250 250 253 253 253
39003-253 253 253 253 253 253 253 253 253 253 253 253
39004-202 202 202 14 14 14 2 2 6 2 2 6
39005- 2 2 6 2 2 6 2 2 6 2 2 6
39006- 42 42 42 86 86 86 42 42 42 18 18 18
39007- 6 6 6 0 0 0 0 0 0 0 0 0
39008- 0 0 0 0 0 0 0 0 0 0 0 0
39009- 0 0 0 0 0 0 0 0 0 0 0 0
39010- 0 0 0 0 0 0 0 0 0 0 0 0
39011- 0 0 0 0 0 0 0 0 0 0 0 0
39012- 0 0 0 0 0 0 0 0 0 0 0 0
39013- 0 0 0 0 0 0 0 0 0 0 0 0
39014- 0 0 0 0 0 0 0 0 0 0 0 0
39015- 0 0 0 0 0 0 0 0 0 0 0 0
39016- 0 0 0 0 0 0 0 0 0 0 0 0
39017- 0 0 0 0 0 0 0 0 0 6 6 6
39018- 14 14 14 38 38 38 74 74 74 66 66 66
39019- 2 2 6 6 6 6 90 90 90 250 250 250
39020-253 253 253 253 253 253 238 238 238 198 198 198
39021-190 190 190 190 190 190 195 195 195 221 221 221
39022-246 246 246 253 253 253 253 253 253 253 253 253
39023-253 253 253 253 253 253 253 253 253 253 253 253
39024-253 253 253 82 82 82 2 2 6 2 2 6
39025- 2 2 6 2 2 6 2 2 6 2 2 6
39026- 2 2 6 78 78 78 70 70 70 34 34 34
39027- 14 14 14 6 6 6 0 0 0 0 0 0
39028- 0 0 0 0 0 0 0 0 0 0 0 0
39029- 0 0 0 0 0 0 0 0 0 0 0 0
39030- 0 0 0 0 0 0 0 0 0 0 0 0
39031- 0 0 0 0 0 0 0 0 0 0 0 0
39032- 0 0 0 0 0 0 0 0 0 0 0 0
39033- 0 0 0 0 0 0 0 0 0 0 0 0
39034- 0 0 0 0 0 0 0 0 0 0 0 0
39035- 0 0 0 0 0 0 0 0 0 0 0 0
39036- 0 0 0 0 0 0 0 0 0 0 0 0
39037- 0 0 0 0 0 0 0 0 0 14 14 14
39038- 34 34 34 66 66 66 78 78 78 6 6 6
39039- 2 2 6 18 18 18 218 218 218 253 253 253
39040-253 253 253 253 253 253 253 253 253 246 246 246
39041-226 226 226 231 231 231 246 246 246 253 253 253
39042-253 253 253 253 253 253 253 253 253 253 253 253
39043-253 253 253 253 253 253 253 253 253 253 253 253
39044-253 253 253 178 178 178 2 2 6 2 2 6
39045- 2 2 6 2 2 6 2 2 6 2 2 6
39046- 2 2 6 18 18 18 90 90 90 62 62 62
39047- 30 30 30 10 10 10 0 0 0 0 0 0
39048- 0 0 0 0 0 0 0 0 0 0 0 0
39049- 0 0 0 0 0 0 0 0 0 0 0 0
39050- 0 0 0 0 0 0 0 0 0 0 0 0
39051- 0 0 0 0 0 0 0 0 0 0 0 0
39052- 0 0 0 0 0 0 0 0 0 0 0 0
39053- 0 0 0 0 0 0 0 0 0 0 0 0
39054- 0 0 0 0 0 0 0 0 0 0 0 0
39055- 0 0 0 0 0 0 0 0 0 0 0 0
39056- 0 0 0 0 0 0 0 0 0 0 0 0
39057- 0 0 0 0 0 0 10 10 10 26 26 26
39058- 58 58 58 90 90 90 18 18 18 2 2 6
39059- 2 2 6 110 110 110 253 253 253 253 253 253
39060-253 253 253 253 253 253 253 253 253 253 253 253
39061-250 250 250 253 253 253 253 253 253 253 253 253
39062-253 253 253 253 253 253 253 253 253 253 253 253
39063-253 253 253 253 253 253 253 253 253 253 253 253
39064-253 253 253 231 231 231 18 18 18 2 2 6
39065- 2 2 6 2 2 6 2 2 6 2 2 6
39066- 2 2 6 2 2 6 18 18 18 94 94 94
39067- 54 54 54 26 26 26 10 10 10 0 0 0
39068- 0 0 0 0 0 0 0 0 0 0 0 0
39069- 0 0 0 0 0 0 0 0 0 0 0 0
39070- 0 0 0 0 0 0 0 0 0 0 0 0
39071- 0 0 0 0 0 0 0 0 0 0 0 0
39072- 0 0 0 0 0 0 0 0 0 0 0 0
39073- 0 0 0 0 0 0 0 0 0 0 0 0
39074- 0 0 0 0 0 0 0 0 0 0 0 0
39075- 0 0 0 0 0 0 0 0 0 0 0 0
39076- 0 0 0 0 0 0 0 0 0 0 0 0
39077- 0 0 0 6 6 6 22 22 22 50 50 50
39078- 90 90 90 26 26 26 2 2 6 2 2 6
39079- 14 14 14 195 195 195 250 250 250 253 253 253
39080-253 253 253 253 253 253 253 253 253 253 253 253
39081-253 253 253 253 253 253 253 253 253 253 253 253
39082-253 253 253 253 253 253 253 253 253 253 253 253
39083-253 253 253 253 253 253 253 253 253 253 253 253
39084-250 250 250 242 242 242 54 54 54 2 2 6
39085- 2 2 6 2 2 6 2 2 6 2 2 6
39086- 2 2 6 2 2 6 2 2 6 38 38 38
39087- 86 86 86 50 50 50 22 22 22 6 6 6
39088- 0 0 0 0 0 0 0 0 0 0 0 0
39089- 0 0 0 0 0 0 0 0 0 0 0 0
39090- 0 0 0 0 0 0 0 0 0 0 0 0
39091- 0 0 0 0 0 0 0 0 0 0 0 0
39092- 0 0 0 0 0 0 0 0 0 0 0 0
39093- 0 0 0 0 0 0 0 0 0 0 0 0
39094- 0 0 0 0 0 0 0 0 0 0 0 0
39095- 0 0 0 0 0 0 0 0 0 0 0 0
39096- 0 0 0 0 0 0 0 0 0 0 0 0
39097- 6 6 6 14 14 14 38 38 38 82 82 82
39098- 34 34 34 2 2 6 2 2 6 2 2 6
39099- 42 42 42 195 195 195 246 246 246 253 253 253
39100-253 253 253 253 253 253 253 253 253 250 250 250
39101-242 242 242 242 242 242 250 250 250 253 253 253
39102-253 253 253 253 253 253 253 253 253 253 253 253
39103-253 253 253 250 250 250 246 246 246 238 238 238
39104-226 226 226 231 231 231 101 101 101 6 6 6
39105- 2 2 6 2 2 6 2 2 6 2 2 6
39106- 2 2 6 2 2 6 2 2 6 2 2 6
39107- 38 38 38 82 82 82 42 42 42 14 14 14
39108- 6 6 6 0 0 0 0 0 0 0 0 0
39109- 0 0 0 0 0 0 0 0 0 0 0 0
39110- 0 0 0 0 0 0 0 0 0 0 0 0
39111- 0 0 0 0 0 0 0 0 0 0 0 0
39112- 0 0 0 0 0 0 0 0 0 0 0 0
39113- 0 0 0 0 0 0 0 0 0 0 0 0
39114- 0 0 0 0 0 0 0 0 0 0 0 0
39115- 0 0 0 0 0 0 0 0 0 0 0 0
39116- 0 0 0 0 0 0 0 0 0 0 0 0
39117- 10 10 10 26 26 26 62 62 62 66 66 66
39118- 2 2 6 2 2 6 2 2 6 6 6 6
39119- 70 70 70 170 170 170 206 206 206 234 234 234
39120-246 246 246 250 250 250 250 250 250 238 238 238
39121-226 226 226 231 231 231 238 238 238 250 250 250
39122-250 250 250 250 250 250 246 246 246 231 231 231
39123-214 214 214 206 206 206 202 202 202 202 202 202
39124-198 198 198 202 202 202 182 182 182 18 18 18
39125- 2 2 6 2 2 6 2 2 6 2 2 6
39126- 2 2 6 2 2 6 2 2 6 2 2 6
39127- 2 2 6 62 62 62 66 66 66 30 30 30
39128- 10 10 10 0 0 0 0 0 0 0 0 0
39129- 0 0 0 0 0 0 0 0 0 0 0 0
39130- 0 0 0 0 0 0 0 0 0 0 0 0
39131- 0 0 0 0 0 0 0 0 0 0 0 0
39132- 0 0 0 0 0 0 0 0 0 0 0 0
39133- 0 0 0 0 0 0 0 0 0 0 0 0
39134- 0 0 0 0 0 0 0 0 0 0 0 0
39135- 0 0 0 0 0 0 0 0 0 0 0 0
39136- 0 0 0 0 0 0 0 0 0 0 0 0
39137- 14 14 14 42 42 42 82 82 82 18 18 18
39138- 2 2 6 2 2 6 2 2 6 10 10 10
39139- 94 94 94 182 182 182 218 218 218 242 242 242
39140-250 250 250 253 253 253 253 253 253 250 250 250
39141-234 234 234 253 253 253 253 253 253 253 253 253
39142-253 253 253 253 253 253 253 253 253 246 246 246
39143-238 238 238 226 226 226 210 210 210 202 202 202
39144-195 195 195 195 195 195 210 210 210 158 158 158
39145- 6 6 6 14 14 14 50 50 50 14 14 14
39146- 2 2 6 2 2 6 2 2 6 2 2 6
39147- 2 2 6 6 6 6 86 86 86 46 46 46
39148- 18 18 18 6 6 6 0 0 0 0 0 0
39149- 0 0 0 0 0 0 0 0 0 0 0 0
39150- 0 0 0 0 0 0 0 0 0 0 0 0
39151- 0 0 0 0 0 0 0 0 0 0 0 0
39152- 0 0 0 0 0 0 0 0 0 0 0 0
39153- 0 0 0 0 0 0 0 0 0 0 0 0
39154- 0 0 0 0 0 0 0 0 0 0 0 0
39155- 0 0 0 0 0 0 0 0 0 0 0 0
39156- 0 0 0 0 0 0 0 0 0 6 6 6
39157- 22 22 22 54 54 54 70 70 70 2 2 6
39158- 2 2 6 10 10 10 2 2 6 22 22 22
39159-166 166 166 231 231 231 250 250 250 253 253 253
39160-253 253 253 253 253 253 253 253 253 250 250 250
39161-242 242 242 253 253 253 253 253 253 253 253 253
39162-253 253 253 253 253 253 253 253 253 253 253 253
39163-253 253 253 253 253 253 253 253 253 246 246 246
39164-231 231 231 206 206 206 198 198 198 226 226 226
39165- 94 94 94 2 2 6 6 6 6 38 38 38
39166- 30 30 30 2 2 6 2 2 6 2 2 6
39167- 2 2 6 2 2 6 62 62 62 66 66 66
39168- 26 26 26 10 10 10 0 0 0 0 0 0
39169- 0 0 0 0 0 0 0 0 0 0 0 0
39170- 0 0 0 0 0 0 0 0 0 0 0 0
39171- 0 0 0 0 0 0 0 0 0 0 0 0
39172- 0 0 0 0 0 0 0 0 0 0 0 0
39173- 0 0 0 0 0 0 0 0 0 0 0 0
39174- 0 0 0 0 0 0 0 0 0 0 0 0
39175- 0 0 0 0 0 0 0 0 0 0 0 0
39176- 0 0 0 0 0 0 0 0 0 10 10 10
39177- 30 30 30 74 74 74 50 50 50 2 2 6
39178- 26 26 26 26 26 26 2 2 6 106 106 106
39179-238 238 238 253 253 253 253 253 253 253 253 253
39180-253 253 253 253 253 253 253 253 253 253 253 253
39181-253 253 253 253 253 253 253 253 253 253 253 253
39182-253 253 253 253 253 253 253 253 253 253 253 253
39183-253 253 253 253 253 253 253 253 253 253 253 253
39184-253 253 253 246 246 246 218 218 218 202 202 202
39185-210 210 210 14 14 14 2 2 6 2 2 6
39186- 30 30 30 22 22 22 2 2 6 2 2 6
39187- 2 2 6 2 2 6 18 18 18 86 86 86
39188- 42 42 42 14 14 14 0 0 0 0 0 0
39189- 0 0 0 0 0 0 0 0 0 0 0 0
39190- 0 0 0 0 0 0 0 0 0 0 0 0
39191- 0 0 0 0 0 0 0 0 0 0 0 0
39192- 0 0 0 0 0 0 0 0 0 0 0 0
39193- 0 0 0 0 0 0 0 0 0 0 0 0
39194- 0 0 0 0 0 0 0 0 0 0 0 0
39195- 0 0 0 0 0 0 0 0 0 0 0 0
39196- 0 0 0 0 0 0 0 0 0 14 14 14
39197- 42 42 42 90 90 90 22 22 22 2 2 6
39198- 42 42 42 2 2 6 18 18 18 218 218 218
39199-253 253 253 253 253 253 253 253 253 253 253 253
39200-253 253 253 253 253 253 253 253 253 253 253 253
39201-253 253 253 253 253 253 253 253 253 253 253 253
39202-253 253 253 253 253 253 253 253 253 253 253 253
39203-253 253 253 253 253 253 253 253 253 253 253 253
39204-253 253 253 253 253 253 250 250 250 221 221 221
39205-218 218 218 101 101 101 2 2 6 14 14 14
39206- 18 18 18 38 38 38 10 10 10 2 2 6
39207- 2 2 6 2 2 6 2 2 6 78 78 78
39208- 58 58 58 22 22 22 6 6 6 0 0 0
39209- 0 0 0 0 0 0 0 0 0 0 0 0
39210- 0 0 0 0 0 0 0 0 0 0 0 0
39211- 0 0 0 0 0 0 0 0 0 0 0 0
39212- 0 0 0 0 0 0 0 0 0 0 0 0
39213- 0 0 0 0 0 0 0 0 0 0 0 0
39214- 0 0 0 0 0 0 0 0 0 0 0 0
39215- 0 0 0 0 0 0 0 0 0 0 0 0
39216- 0 0 0 0 0 0 6 6 6 18 18 18
39217- 54 54 54 82 82 82 2 2 6 26 26 26
39218- 22 22 22 2 2 6 123 123 123 253 253 253
39219-253 253 253 253 253 253 253 253 253 253 253 253
39220-253 253 253 253 253 253 253 253 253 253 253 253
39221-253 253 253 253 253 253 253 253 253 253 253 253
39222-253 253 253 253 253 253 253 253 253 253 253 253
39223-253 253 253 253 253 253 253 253 253 253 253 253
39224-253 253 253 253 253 253 253 253 253 250 250 250
39225-238 238 238 198 198 198 6 6 6 38 38 38
39226- 58 58 58 26 26 26 38 38 38 2 2 6
39227- 2 2 6 2 2 6 2 2 6 46 46 46
39228- 78 78 78 30 30 30 10 10 10 0 0 0
39229- 0 0 0 0 0 0 0 0 0 0 0 0
39230- 0 0 0 0 0 0 0 0 0 0 0 0
39231- 0 0 0 0 0 0 0 0 0 0 0 0
39232- 0 0 0 0 0 0 0 0 0 0 0 0
39233- 0 0 0 0 0 0 0 0 0 0 0 0
39234- 0 0 0 0 0 0 0 0 0 0 0 0
39235- 0 0 0 0 0 0 0 0 0 0 0 0
39236- 0 0 0 0 0 0 10 10 10 30 30 30
39237- 74 74 74 58 58 58 2 2 6 42 42 42
39238- 2 2 6 22 22 22 231 231 231 253 253 253
39239-253 253 253 253 253 253 253 253 253 253 253 253
39240-253 253 253 253 253 253 253 253 253 250 250 250
39241-253 253 253 253 253 253 253 253 253 253 253 253
39242-253 253 253 253 253 253 253 253 253 253 253 253
39243-253 253 253 253 253 253 253 253 253 253 253 253
39244-253 253 253 253 253 253 253 253 253 253 253 253
39245-253 253 253 246 246 246 46 46 46 38 38 38
39246- 42 42 42 14 14 14 38 38 38 14 14 14
39247- 2 2 6 2 2 6 2 2 6 6 6 6
39248- 86 86 86 46 46 46 14 14 14 0 0 0
39249- 0 0 0 0 0 0 0 0 0 0 0 0
39250- 0 0 0 0 0 0 0 0 0 0 0 0
39251- 0 0 0 0 0 0 0 0 0 0 0 0
39252- 0 0 0 0 0 0 0 0 0 0 0 0
39253- 0 0 0 0 0 0 0 0 0 0 0 0
39254- 0 0 0 0 0 0 0 0 0 0 0 0
39255- 0 0 0 0 0 0 0 0 0 0 0 0
39256- 0 0 0 6 6 6 14 14 14 42 42 42
39257- 90 90 90 18 18 18 18 18 18 26 26 26
39258- 2 2 6 116 116 116 253 253 253 253 253 253
39259-253 253 253 253 253 253 253 253 253 253 253 253
39260-253 253 253 253 253 253 250 250 250 238 238 238
39261-253 253 253 253 253 253 253 253 253 253 253 253
39262-253 253 253 253 253 253 253 253 253 253 253 253
39263-253 253 253 253 253 253 253 253 253 253 253 253
39264-253 253 253 253 253 253 253 253 253 253 253 253
39265-253 253 253 253 253 253 94 94 94 6 6 6
39266- 2 2 6 2 2 6 10 10 10 34 34 34
39267- 2 2 6 2 2 6 2 2 6 2 2 6
39268- 74 74 74 58 58 58 22 22 22 6 6 6
39269- 0 0 0 0 0 0 0 0 0 0 0 0
39270- 0 0 0 0 0 0 0 0 0 0 0 0
39271- 0 0 0 0 0 0 0 0 0 0 0 0
39272- 0 0 0 0 0 0 0 0 0 0 0 0
39273- 0 0 0 0 0 0 0 0 0 0 0 0
39274- 0 0 0 0 0 0 0 0 0 0 0 0
39275- 0 0 0 0 0 0 0 0 0 0 0 0
39276- 0 0 0 10 10 10 26 26 26 66 66 66
39277- 82 82 82 2 2 6 38 38 38 6 6 6
39278- 14 14 14 210 210 210 253 253 253 253 253 253
39279-253 253 253 253 253 253 253 253 253 253 253 253
39280-253 253 253 253 253 253 246 246 246 242 242 242
39281-253 253 253 253 253 253 253 253 253 253 253 253
39282-253 253 253 253 253 253 253 253 253 253 253 253
39283-253 253 253 253 253 253 253 253 253 253 253 253
39284-253 253 253 253 253 253 253 253 253 253 253 253
39285-253 253 253 253 253 253 144 144 144 2 2 6
39286- 2 2 6 2 2 6 2 2 6 46 46 46
39287- 2 2 6 2 2 6 2 2 6 2 2 6
39288- 42 42 42 74 74 74 30 30 30 10 10 10
39289- 0 0 0 0 0 0 0 0 0 0 0 0
39290- 0 0 0 0 0 0 0 0 0 0 0 0
39291- 0 0 0 0 0 0 0 0 0 0 0 0
39292- 0 0 0 0 0 0 0 0 0 0 0 0
39293- 0 0 0 0 0 0 0 0 0 0 0 0
39294- 0 0 0 0 0 0 0 0 0 0 0 0
39295- 0 0 0 0 0 0 0 0 0 0 0 0
39296- 6 6 6 14 14 14 42 42 42 90 90 90
39297- 26 26 26 6 6 6 42 42 42 2 2 6
39298- 74 74 74 250 250 250 253 253 253 253 253 253
39299-253 253 253 253 253 253 253 253 253 253 253 253
39300-253 253 253 253 253 253 242 242 242 242 242 242
39301-253 253 253 253 253 253 253 253 253 253 253 253
39302-253 253 253 253 253 253 253 253 253 253 253 253
39303-253 253 253 253 253 253 253 253 253 253 253 253
39304-253 253 253 253 253 253 253 253 253 253 253 253
39305-253 253 253 253 253 253 182 182 182 2 2 6
39306- 2 2 6 2 2 6 2 2 6 46 46 46
39307- 2 2 6 2 2 6 2 2 6 2 2 6
39308- 10 10 10 86 86 86 38 38 38 10 10 10
39309- 0 0 0 0 0 0 0 0 0 0 0 0
39310- 0 0 0 0 0 0 0 0 0 0 0 0
39311- 0 0 0 0 0 0 0 0 0 0 0 0
39312- 0 0 0 0 0 0 0 0 0 0 0 0
39313- 0 0 0 0 0 0 0 0 0 0 0 0
39314- 0 0 0 0 0 0 0 0 0 0 0 0
39315- 0 0 0 0 0 0 0 0 0 0 0 0
39316- 10 10 10 26 26 26 66 66 66 82 82 82
39317- 2 2 6 22 22 22 18 18 18 2 2 6
39318-149 149 149 253 253 253 253 253 253 253 253 253
39319-253 253 253 253 253 253 253 253 253 253 253 253
39320-253 253 253 253 253 253 234 234 234 242 242 242
39321-253 253 253 253 253 253 253 253 253 253 253 253
39322-253 253 253 253 253 253 253 253 253 253 253 253
39323-253 253 253 253 253 253 253 253 253 253 253 253
39324-253 253 253 253 253 253 253 253 253 253 253 253
39325-253 253 253 253 253 253 206 206 206 2 2 6
39326- 2 2 6 2 2 6 2 2 6 38 38 38
39327- 2 2 6 2 2 6 2 2 6 2 2 6
39328- 6 6 6 86 86 86 46 46 46 14 14 14
39329- 0 0 0 0 0 0 0 0 0 0 0 0
39330- 0 0 0 0 0 0 0 0 0 0 0 0
39331- 0 0 0 0 0 0 0 0 0 0 0 0
39332- 0 0 0 0 0 0 0 0 0 0 0 0
39333- 0 0 0 0 0 0 0 0 0 0 0 0
39334- 0 0 0 0 0 0 0 0 0 0 0 0
39335- 0 0 0 0 0 0 0 0 0 6 6 6
39336- 18 18 18 46 46 46 86 86 86 18 18 18
39337- 2 2 6 34 34 34 10 10 10 6 6 6
39338-210 210 210 253 253 253 253 253 253 253 253 253
39339-253 253 253 253 253 253 253 253 253 253 253 253
39340-253 253 253 253 253 253 234 234 234 242 242 242
39341-253 253 253 253 253 253 253 253 253 253 253 253
39342-253 253 253 253 253 253 253 253 253 253 253 253
39343-253 253 253 253 253 253 253 253 253 253 253 253
39344-253 253 253 253 253 253 253 253 253 253 253 253
39345-253 253 253 253 253 253 221 221 221 6 6 6
39346- 2 2 6 2 2 6 6 6 6 30 30 30
39347- 2 2 6 2 2 6 2 2 6 2 2 6
39348- 2 2 6 82 82 82 54 54 54 18 18 18
39349- 6 6 6 0 0 0 0 0 0 0 0 0
39350- 0 0 0 0 0 0 0 0 0 0 0 0
39351- 0 0 0 0 0 0 0 0 0 0 0 0
39352- 0 0 0 0 0 0 0 0 0 0 0 0
39353- 0 0 0 0 0 0 0 0 0 0 0 0
39354- 0 0 0 0 0 0 0 0 0 0 0 0
39355- 0 0 0 0 0 0 0 0 0 10 10 10
39356- 26 26 26 66 66 66 62 62 62 2 2 6
39357- 2 2 6 38 38 38 10 10 10 26 26 26
39358-238 238 238 253 253 253 253 253 253 253 253 253
39359-253 253 253 253 253 253 253 253 253 253 253 253
39360-253 253 253 253 253 253 231 231 231 238 238 238
39361-253 253 253 253 253 253 253 253 253 253 253 253
39362-253 253 253 253 253 253 253 253 253 253 253 253
39363-253 253 253 253 253 253 253 253 253 253 253 253
39364-253 253 253 253 253 253 253 253 253 253 253 253
39365-253 253 253 253 253 253 231 231 231 6 6 6
39366- 2 2 6 2 2 6 10 10 10 30 30 30
39367- 2 2 6 2 2 6 2 2 6 2 2 6
39368- 2 2 6 66 66 66 58 58 58 22 22 22
39369- 6 6 6 0 0 0 0 0 0 0 0 0
39370- 0 0 0 0 0 0 0 0 0 0 0 0
39371- 0 0 0 0 0 0 0 0 0 0 0 0
39372- 0 0 0 0 0 0 0 0 0 0 0 0
39373- 0 0 0 0 0 0 0 0 0 0 0 0
39374- 0 0 0 0 0 0 0 0 0 0 0 0
39375- 0 0 0 0 0 0 0 0 0 10 10 10
39376- 38 38 38 78 78 78 6 6 6 2 2 6
39377- 2 2 6 46 46 46 14 14 14 42 42 42
39378-246 246 246 253 253 253 253 253 253 253 253 253
39379-253 253 253 253 253 253 253 253 253 253 253 253
39380-253 253 253 253 253 253 231 231 231 242 242 242
39381-253 253 253 253 253 253 253 253 253 253 253 253
39382-253 253 253 253 253 253 253 253 253 253 253 253
39383-253 253 253 253 253 253 253 253 253 253 253 253
39384-253 253 253 253 253 253 253 253 253 253 253 253
39385-253 253 253 253 253 253 234 234 234 10 10 10
39386- 2 2 6 2 2 6 22 22 22 14 14 14
39387- 2 2 6 2 2 6 2 2 6 2 2 6
39388- 2 2 6 66 66 66 62 62 62 22 22 22
39389- 6 6 6 0 0 0 0 0 0 0 0 0
39390- 0 0 0 0 0 0 0 0 0 0 0 0
39391- 0 0 0 0 0 0 0 0 0 0 0 0
39392- 0 0 0 0 0 0 0 0 0 0 0 0
39393- 0 0 0 0 0 0 0 0 0 0 0 0
39394- 0 0 0 0 0 0 0 0 0 0 0 0
39395- 0 0 0 0 0 0 6 6 6 18 18 18
39396- 50 50 50 74 74 74 2 2 6 2 2 6
39397- 14 14 14 70 70 70 34 34 34 62 62 62
39398-250 250 250 253 253 253 253 253 253 253 253 253
39399-253 253 253 253 253 253 253 253 253 253 253 253
39400-253 253 253 253 253 253 231 231 231 246 246 246
39401-253 253 253 253 253 253 253 253 253 253 253 253
39402-253 253 253 253 253 253 253 253 253 253 253 253
39403-253 253 253 253 253 253 253 253 253 253 253 253
39404-253 253 253 253 253 253 253 253 253 253 253 253
39405-253 253 253 253 253 253 234 234 234 14 14 14
39406- 2 2 6 2 2 6 30 30 30 2 2 6
39407- 2 2 6 2 2 6 2 2 6 2 2 6
39408- 2 2 6 66 66 66 62 62 62 22 22 22
39409- 6 6 6 0 0 0 0 0 0 0 0 0
39410- 0 0 0 0 0 0 0 0 0 0 0 0
39411- 0 0 0 0 0 0 0 0 0 0 0 0
39412- 0 0 0 0 0 0 0 0 0 0 0 0
39413- 0 0 0 0 0 0 0 0 0 0 0 0
39414- 0 0 0 0 0 0 0 0 0 0 0 0
39415- 0 0 0 0 0 0 6 6 6 18 18 18
39416- 54 54 54 62 62 62 2 2 6 2 2 6
39417- 2 2 6 30 30 30 46 46 46 70 70 70
39418-250 250 250 253 253 253 253 253 253 253 253 253
39419-253 253 253 253 253 253 253 253 253 253 253 253
39420-253 253 253 253 253 253 231 231 231 246 246 246
39421-253 253 253 253 253 253 253 253 253 253 253 253
39422-253 253 253 253 253 253 253 253 253 253 253 253
39423-253 253 253 253 253 253 253 253 253 253 253 253
39424-253 253 253 253 253 253 253 253 253 253 253 253
39425-253 253 253 253 253 253 226 226 226 10 10 10
39426- 2 2 6 6 6 6 30 30 30 2 2 6
39427- 2 2 6 2 2 6 2 2 6 2 2 6
39428- 2 2 6 66 66 66 58 58 58 22 22 22
39429- 6 6 6 0 0 0 0 0 0 0 0 0
39430- 0 0 0 0 0 0 0 0 0 0 0 0
39431- 0 0 0 0 0 0 0 0 0 0 0 0
39432- 0 0 0 0 0 0 0 0 0 0 0 0
39433- 0 0 0 0 0 0 0 0 0 0 0 0
39434- 0 0 0 0 0 0 0 0 0 0 0 0
39435- 0 0 0 0 0 0 6 6 6 22 22 22
39436- 58 58 58 62 62 62 2 2 6 2 2 6
39437- 2 2 6 2 2 6 30 30 30 78 78 78
39438-250 250 250 253 253 253 253 253 253 253 253 253
39439-253 253 253 253 253 253 253 253 253 253 253 253
39440-253 253 253 253 253 253 231 231 231 246 246 246
39441-253 253 253 253 253 253 253 253 253 253 253 253
39442-253 253 253 253 253 253 253 253 253 253 253 253
39443-253 253 253 253 253 253 253 253 253 253 253 253
39444-253 253 253 253 253 253 253 253 253 253 253 253
39445-253 253 253 253 253 253 206 206 206 2 2 6
39446- 22 22 22 34 34 34 18 14 6 22 22 22
39447- 26 26 26 18 18 18 6 6 6 2 2 6
39448- 2 2 6 82 82 82 54 54 54 18 18 18
39449- 6 6 6 0 0 0 0 0 0 0 0 0
39450- 0 0 0 0 0 0 0 0 0 0 0 0
39451- 0 0 0 0 0 0 0 0 0 0 0 0
39452- 0 0 0 0 0 0 0 0 0 0 0 0
39453- 0 0 0 0 0 0 0 0 0 0 0 0
39454- 0 0 0 0 0 0 0 0 0 0 0 0
39455- 0 0 0 0 0 0 6 6 6 26 26 26
39456- 62 62 62 106 106 106 74 54 14 185 133 11
39457-210 162 10 121 92 8 6 6 6 62 62 62
39458-238 238 238 253 253 253 253 253 253 253 253 253
39459-253 253 253 253 253 253 253 253 253 253 253 253
39460-253 253 253 253 253 253 231 231 231 246 246 246
39461-253 253 253 253 253 253 253 253 253 253 253 253
39462-253 253 253 253 253 253 253 253 253 253 253 253
39463-253 253 253 253 253 253 253 253 253 253 253 253
39464-253 253 253 253 253 253 253 253 253 253 253 253
39465-253 253 253 253 253 253 158 158 158 18 18 18
39466- 14 14 14 2 2 6 2 2 6 2 2 6
39467- 6 6 6 18 18 18 66 66 66 38 38 38
39468- 6 6 6 94 94 94 50 50 50 18 18 18
39469- 6 6 6 0 0 0 0 0 0 0 0 0
39470- 0 0 0 0 0 0 0 0 0 0 0 0
39471- 0 0 0 0 0 0 0 0 0 0 0 0
39472- 0 0 0 0 0 0 0 0 0 0 0 0
39473- 0 0 0 0 0 0 0 0 0 0 0 0
39474- 0 0 0 0 0 0 0 0 0 6 6 6
39475- 10 10 10 10 10 10 18 18 18 38 38 38
39476- 78 78 78 142 134 106 216 158 10 242 186 14
39477-246 190 14 246 190 14 156 118 10 10 10 10
39478- 90 90 90 238 238 238 253 253 253 253 253 253
39479-253 253 253 253 253 253 253 253 253 253 253 253
39480-253 253 253 253 253 253 231 231 231 250 250 250
39481-253 253 253 253 253 253 253 253 253 253 253 253
39482-253 253 253 253 253 253 253 253 253 253 253 253
39483-253 253 253 253 253 253 253 253 253 253 253 253
39484-253 253 253 253 253 253 253 253 253 246 230 190
39485-238 204 91 238 204 91 181 142 44 37 26 9
39486- 2 2 6 2 2 6 2 2 6 2 2 6
39487- 2 2 6 2 2 6 38 38 38 46 46 46
39488- 26 26 26 106 106 106 54 54 54 18 18 18
39489- 6 6 6 0 0 0 0 0 0 0 0 0
39490- 0 0 0 0 0 0 0 0 0 0 0 0
39491- 0 0 0 0 0 0 0 0 0 0 0 0
39492- 0 0 0 0 0 0 0 0 0 0 0 0
39493- 0 0 0 0 0 0 0 0 0 0 0 0
39494- 0 0 0 6 6 6 14 14 14 22 22 22
39495- 30 30 30 38 38 38 50 50 50 70 70 70
39496-106 106 106 190 142 34 226 170 11 242 186 14
39497-246 190 14 246 190 14 246 190 14 154 114 10
39498- 6 6 6 74 74 74 226 226 226 253 253 253
39499-253 253 253 253 253 253 253 253 253 253 253 253
39500-253 253 253 253 253 253 231 231 231 250 250 250
39501-253 253 253 253 253 253 253 253 253 253 253 253
39502-253 253 253 253 253 253 253 253 253 253 253 253
39503-253 253 253 253 253 253 253 253 253 253 253 253
39504-253 253 253 253 253 253 253 253 253 228 184 62
39505-241 196 14 241 208 19 232 195 16 38 30 10
39506- 2 2 6 2 2 6 2 2 6 2 2 6
39507- 2 2 6 6 6 6 30 30 30 26 26 26
39508-203 166 17 154 142 90 66 66 66 26 26 26
39509- 6 6 6 0 0 0 0 0 0 0 0 0
39510- 0 0 0 0 0 0 0 0 0 0 0 0
39511- 0 0 0 0 0 0 0 0 0 0 0 0
39512- 0 0 0 0 0 0 0 0 0 0 0 0
39513- 0 0 0 0 0 0 0 0 0 0 0 0
39514- 6 6 6 18 18 18 38 38 38 58 58 58
39515- 78 78 78 86 86 86 101 101 101 123 123 123
39516-175 146 61 210 150 10 234 174 13 246 186 14
39517-246 190 14 246 190 14 246 190 14 238 190 10
39518-102 78 10 2 2 6 46 46 46 198 198 198
39519-253 253 253 253 253 253 253 253 253 253 253 253
39520-253 253 253 253 253 253 234 234 234 242 242 242
39521-253 253 253 253 253 253 253 253 253 253 253 253
39522-253 253 253 253 253 253 253 253 253 253 253 253
39523-253 253 253 253 253 253 253 253 253 253 253 253
39524-253 253 253 253 253 253 253 253 253 224 178 62
39525-242 186 14 241 196 14 210 166 10 22 18 6
39526- 2 2 6 2 2 6 2 2 6 2 2 6
39527- 2 2 6 2 2 6 6 6 6 121 92 8
39528-238 202 15 232 195 16 82 82 82 34 34 34
39529- 10 10 10 0 0 0 0 0 0 0 0 0
39530- 0 0 0 0 0 0 0 0 0 0 0 0
39531- 0 0 0 0 0 0 0 0 0 0 0 0
39532- 0 0 0 0 0 0 0 0 0 0 0 0
39533- 0 0 0 0 0 0 0 0 0 0 0 0
39534- 14 14 14 38 38 38 70 70 70 154 122 46
39535-190 142 34 200 144 11 197 138 11 197 138 11
39536-213 154 11 226 170 11 242 186 14 246 190 14
39537-246 190 14 246 190 14 246 190 14 246 190 14
39538-225 175 15 46 32 6 2 2 6 22 22 22
39539-158 158 158 250 250 250 253 253 253 253 253 253
39540-253 253 253 253 253 253 253 253 253 253 253 253
39541-253 253 253 253 253 253 253 253 253 253 253 253
39542-253 253 253 253 253 253 253 253 253 253 253 253
39543-253 253 253 253 253 253 253 253 253 253 253 253
39544-253 253 253 250 250 250 242 242 242 224 178 62
39545-239 182 13 236 186 11 213 154 11 46 32 6
39546- 2 2 6 2 2 6 2 2 6 2 2 6
39547- 2 2 6 2 2 6 61 42 6 225 175 15
39548-238 190 10 236 186 11 112 100 78 42 42 42
39549- 14 14 14 0 0 0 0 0 0 0 0 0
39550- 0 0 0 0 0 0 0 0 0 0 0 0
39551- 0 0 0 0 0 0 0 0 0 0 0 0
39552- 0 0 0 0 0 0 0 0 0 0 0 0
39553- 0 0 0 0 0 0 0 0 0 6 6 6
39554- 22 22 22 54 54 54 154 122 46 213 154 11
39555-226 170 11 230 174 11 226 170 11 226 170 11
39556-236 178 12 242 186 14 246 190 14 246 190 14
39557-246 190 14 246 190 14 246 190 14 246 190 14
39558-241 196 14 184 144 12 10 10 10 2 2 6
39559- 6 6 6 116 116 116 242 242 242 253 253 253
39560-253 253 253 253 253 253 253 253 253 253 253 253
39561-253 253 253 253 253 253 253 253 253 253 253 253
39562-253 253 253 253 253 253 253 253 253 253 253 253
39563-253 253 253 253 253 253 253 253 253 253 253 253
39564-253 253 253 231 231 231 198 198 198 214 170 54
39565-236 178 12 236 178 12 210 150 10 137 92 6
39566- 18 14 6 2 2 6 2 2 6 2 2 6
39567- 6 6 6 70 47 6 200 144 11 236 178 12
39568-239 182 13 239 182 13 124 112 88 58 58 58
39569- 22 22 22 6 6 6 0 0 0 0 0 0
39570- 0 0 0 0 0 0 0 0 0 0 0 0
39571- 0 0 0 0 0 0 0 0 0 0 0 0
39572- 0 0 0 0 0 0 0 0 0 0 0 0
39573- 0 0 0 0 0 0 0 0 0 10 10 10
39574- 30 30 30 70 70 70 180 133 36 226 170 11
39575-239 182 13 242 186 14 242 186 14 246 186 14
39576-246 190 14 246 190 14 246 190 14 246 190 14
39577-246 190 14 246 190 14 246 190 14 246 190 14
39578-246 190 14 232 195 16 98 70 6 2 2 6
39579- 2 2 6 2 2 6 66 66 66 221 221 221
39580-253 253 253 253 253 253 253 253 253 253 253 253
39581-253 253 253 253 253 253 253 253 253 253 253 253
39582-253 253 253 253 253 253 253 253 253 253 253 253
39583-253 253 253 253 253 253 253 253 253 253 253 253
39584-253 253 253 206 206 206 198 198 198 214 166 58
39585-230 174 11 230 174 11 216 158 10 192 133 9
39586-163 110 8 116 81 8 102 78 10 116 81 8
39587-167 114 7 197 138 11 226 170 11 239 182 13
39588-242 186 14 242 186 14 162 146 94 78 78 78
39589- 34 34 34 14 14 14 6 6 6 0 0 0
39590- 0 0 0 0 0 0 0 0 0 0 0 0
39591- 0 0 0 0 0 0 0 0 0 0 0 0
39592- 0 0 0 0 0 0 0 0 0 0 0 0
39593- 0 0 0 0 0 0 0 0 0 6 6 6
39594- 30 30 30 78 78 78 190 142 34 226 170 11
39595-239 182 13 246 190 14 246 190 14 246 190 14
39596-246 190 14 246 190 14 246 190 14 246 190 14
39597-246 190 14 246 190 14 246 190 14 246 190 14
39598-246 190 14 241 196 14 203 166 17 22 18 6
39599- 2 2 6 2 2 6 2 2 6 38 38 38
39600-218 218 218 253 253 253 253 253 253 253 253 253
39601-253 253 253 253 253 253 253 253 253 253 253 253
39602-253 253 253 253 253 253 253 253 253 253 253 253
39603-253 253 253 253 253 253 253 253 253 253 253 253
39604-250 250 250 206 206 206 198 198 198 202 162 69
39605-226 170 11 236 178 12 224 166 10 210 150 10
39606-200 144 11 197 138 11 192 133 9 197 138 11
39607-210 150 10 226 170 11 242 186 14 246 190 14
39608-246 190 14 246 186 14 225 175 15 124 112 88
39609- 62 62 62 30 30 30 14 14 14 6 6 6
39610- 0 0 0 0 0 0 0 0 0 0 0 0
39611- 0 0 0 0 0 0 0 0 0 0 0 0
39612- 0 0 0 0 0 0 0 0 0 0 0 0
39613- 0 0 0 0 0 0 0 0 0 10 10 10
39614- 30 30 30 78 78 78 174 135 50 224 166 10
39615-239 182 13 246 190 14 246 190 14 246 190 14
39616-246 190 14 246 190 14 246 190 14 246 190 14
39617-246 190 14 246 190 14 246 190 14 246 190 14
39618-246 190 14 246 190 14 241 196 14 139 102 15
39619- 2 2 6 2 2 6 2 2 6 2 2 6
39620- 78 78 78 250 250 250 253 253 253 253 253 253
39621-253 253 253 253 253 253 253 253 253 253 253 253
39622-253 253 253 253 253 253 253 253 253 253 253 253
39623-253 253 253 253 253 253 253 253 253 253 253 253
39624-250 250 250 214 214 214 198 198 198 190 150 46
39625-219 162 10 236 178 12 234 174 13 224 166 10
39626-216 158 10 213 154 11 213 154 11 216 158 10
39627-226 170 11 239 182 13 246 190 14 246 190 14
39628-246 190 14 246 190 14 242 186 14 206 162 42
39629-101 101 101 58 58 58 30 30 30 14 14 14
39630- 6 6 6 0 0 0 0 0 0 0 0 0
39631- 0 0 0 0 0 0 0 0 0 0 0 0
39632- 0 0 0 0 0 0 0 0 0 0 0 0
39633- 0 0 0 0 0 0 0 0 0 10 10 10
39634- 30 30 30 74 74 74 174 135 50 216 158 10
39635-236 178 12 246 190 14 246 190 14 246 190 14
39636-246 190 14 246 190 14 246 190 14 246 190 14
39637-246 190 14 246 190 14 246 190 14 246 190 14
39638-246 190 14 246 190 14 241 196 14 226 184 13
39639- 61 42 6 2 2 6 2 2 6 2 2 6
39640- 22 22 22 238 238 238 253 253 253 253 253 253
39641-253 253 253 253 253 253 253 253 253 253 253 253
39642-253 253 253 253 253 253 253 253 253 253 253 253
39643-253 253 253 253 253 253 253 253 253 253 253 253
39644-253 253 253 226 226 226 187 187 187 180 133 36
39645-216 158 10 236 178 12 239 182 13 236 178 12
39646-230 174 11 226 170 11 226 170 11 230 174 11
39647-236 178 12 242 186 14 246 190 14 246 190 14
39648-246 190 14 246 190 14 246 186 14 239 182 13
39649-206 162 42 106 106 106 66 66 66 34 34 34
39650- 14 14 14 6 6 6 0 0 0 0 0 0
39651- 0 0 0 0 0 0 0 0 0 0 0 0
39652- 0 0 0 0 0 0 0 0 0 0 0 0
39653- 0 0 0 0 0 0 0 0 0 6 6 6
39654- 26 26 26 70 70 70 163 133 67 213 154 11
39655-236 178 12 246 190 14 246 190 14 246 190 14
39656-246 190 14 246 190 14 246 190 14 246 190 14
39657-246 190 14 246 190 14 246 190 14 246 190 14
39658-246 190 14 246 190 14 246 190 14 241 196 14
39659-190 146 13 18 14 6 2 2 6 2 2 6
39660- 46 46 46 246 246 246 253 253 253 253 253 253
39661-253 253 253 253 253 253 253 253 253 253 253 253
39662-253 253 253 253 253 253 253 253 253 253 253 253
39663-253 253 253 253 253 253 253 253 253 253 253 253
39664-253 253 253 221 221 221 86 86 86 156 107 11
39665-216 158 10 236 178 12 242 186 14 246 186 14
39666-242 186 14 239 182 13 239 182 13 242 186 14
39667-242 186 14 246 186 14 246 190 14 246 190 14
39668-246 190 14 246 190 14 246 190 14 246 190 14
39669-242 186 14 225 175 15 142 122 72 66 66 66
39670- 30 30 30 10 10 10 0 0 0 0 0 0
39671- 0 0 0 0 0 0 0 0 0 0 0 0
39672- 0 0 0 0 0 0 0 0 0 0 0 0
39673- 0 0 0 0 0 0 0 0 0 6 6 6
39674- 26 26 26 70 70 70 163 133 67 210 150 10
39675-236 178 12 246 190 14 246 190 14 246 190 14
39676-246 190 14 246 190 14 246 190 14 246 190 14
39677-246 190 14 246 190 14 246 190 14 246 190 14
39678-246 190 14 246 190 14 246 190 14 246 190 14
39679-232 195 16 121 92 8 34 34 34 106 106 106
39680-221 221 221 253 253 253 253 253 253 253 253 253
39681-253 253 253 253 253 253 253 253 253 253 253 253
39682-253 253 253 253 253 253 253 253 253 253 253 253
39683-253 253 253 253 253 253 253 253 253 253 253 253
39684-242 242 242 82 82 82 18 14 6 163 110 8
39685-216 158 10 236 178 12 242 186 14 246 190 14
39686-246 190 14 246 190 14 246 190 14 246 190 14
39687-246 190 14 246 190 14 246 190 14 246 190 14
39688-246 190 14 246 190 14 246 190 14 246 190 14
39689-246 190 14 246 190 14 242 186 14 163 133 67
39690- 46 46 46 18 18 18 6 6 6 0 0 0
39691- 0 0 0 0 0 0 0 0 0 0 0 0
39692- 0 0 0 0 0 0 0 0 0 0 0 0
39693- 0 0 0 0 0 0 0 0 0 10 10 10
39694- 30 30 30 78 78 78 163 133 67 210 150 10
39695-236 178 12 246 186 14 246 190 14 246 190 14
39696-246 190 14 246 190 14 246 190 14 246 190 14
39697-246 190 14 246 190 14 246 190 14 246 190 14
39698-246 190 14 246 190 14 246 190 14 246 190 14
39699-241 196 14 215 174 15 190 178 144 253 253 253
39700-253 253 253 253 253 253 253 253 253 253 253 253
39701-253 253 253 253 253 253 253 253 253 253 253 253
39702-253 253 253 253 253 253 253 253 253 253 253 253
39703-253 253 253 253 253 253 253 253 253 218 218 218
39704- 58 58 58 2 2 6 22 18 6 167 114 7
39705-216 158 10 236 178 12 246 186 14 246 190 14
39706-246 190 14 246 190 14 246 190 14 246 190 14
39707-246 190 14 246 190 14 246 190 14 246 190 14
39708-246 190 14 246 190 14 246 190 14 246 190 14
39709-246 190 14 246 186 14 242 186 14 190 150 46
39710- 54 54 54 22 22 22 6 6 6 0 0 0
39711- 0 0 0 0 0 0 0 0 0 0 0 0
39712- 0 0 0 0 0 0 0 0 0 0 0 0
39713- 0 0 0 0 0 0 0 0 0 14 14 14
39714- 38 38 38 86 86 86 180 133 36 213 154 11
39715-236 178 12 246 186 14 246 190 14 246 190 14
39716-246 190 14 246 190 14 246 190 14 246 190 14
39717-246 190 14 246 190 14 246 190 14 246 190 14
39718-246 190 14 246 190 14 246 190 14 246 190 14
39719-246 190 14 232 195 16 190 146 13 214 214 214
39720-253 253 253 253 253 253 253 253 253 253 253 253
39721-253 253 253 253 253 253 253 253 253 253 253 253
39722-253 253 253 253 253 253 253 253 253 253 253 253
39723-253 253 253 250 250 250 170 170 170 26 26 26
39724- 2 2 6 2 2 6 37 26 9 163 110 8
39725-219 162 10 239 182 13 246 186 14 246 190 14
39726-246 190 14 246 190 14 246 190 14 246 190 14
39727-246 190 14 246 190 14 246 190 14 246 190 14
39728-246 190 14 246 190 14 246 190 14 246 190 14
39729-246 186 14 236 178 12 224 166 10 142 122 72
39730- 46 46 46 18 18 18 6 6 6 0 0 0
39731- 0 0 0 0 0 0 0 0 0 0 0 0
39732- 0 0 0 0 0 0 0 0 0 0 0 0
39733- 0 0 0 0 0 0 6 6 6 18 18 18
39734- 50 50 50 109 106 95 192 133 9 224 166 10
39735-242 186 14 246 190 14 246 190 14 246 190 14
39736-246 190 14 246 190 14 246 190 14 246 190 14
39737-246 190 14 246 190 14 246 190 14 246 190 14
39738-246 190 14 246 190 14 246 190 14 246 190 14
39739-242 186 14 226 184 13 210 162 10 142 110 46
39740-226 226 226 253 253 253 253 253 253 253 253 253
39741-253 253 253 253 253 253 253 253 253 253 253 253
39742-253 253 253 253 253 253 253 253 253 253 253 253
39743-198 198 198 66 66 66 2 2 6 2 2 6
39744- 2 2 6 2 2 6 50 34 6 156 107 11
39745-219 162 10 239 182 13 246 186 14 246 190 14
39746-246 190 14 246 190 14 246 190 14 246 190 14
39747-246 190 14 246 190 14 246 190 14 246 190 14
39748-246 190 14 246 190 14 246 190 14 242 186 14
39749-234 174 13 213 154 11 154 122 46 66 66 66
39750- 30 30 30 10 10 10 0 0 0 0 0 0
39751- 0 0 0 0 0 0 0 0 0 0 0 0
39752- 0 0 0 0 0 0 0 0 0 0 0 0
39753- 0 0 0 0 0 0 6 6 6 22 22 22
39754- 58 58 58 154 121 60 206 145 10 234 174 13
39755-242 186 14 246 186 14 246 190 14 246 190 14
39756-246 190 14 246 190 14 246 190 14 246 190 14
39757-246 190 14 246 190 14 246 190 14 246 190 14
39758-246 190 14 246 190 14 246 190 14 246 190 14
39759-246 186 14 236 178 12 210 162 10 163 110 8
39760- 61 42 6 138 138 138 218 218 218 250 250 250
39761-253 253 253 253 253 253 253 253 253 250 250 250
39762-242 242 242 210 210 210 144 144 144 66 66 66
39763- 6 6 6 2 2 6 2 2 6 2 2 6
39764- 2 2 6 2 2 6 61 42 6 163 110 8
39765-216 158 10 236 178 12 246 190 14 246 190 14
39766-246 190 14 246 190 14 246 190 14 246 190 14
39767-246 190 14 246 190 14 246 190 14 246 190 14
39768-246 190 14 239 182 13 230 174 11 216 158 10
39769-190 142 34 124 112 88 70 70 70 38 38 38
39770- 18 18 18 6 6 6 0 0 0 0 0 0
39771- 0 0 0 0 0 0 0 0 0 0 0 0
39772- 0 0 0 0 0 0 0 0 0 0 0 0
39773- 0 0 0 0 0 0 6 6 6 22 22 22
39774- 62 62 62 168 124 44 206 145 10 224 166 10
39775-236 178 12 239 182 13 242 186 14 242 186 14
39776-246 186 14 246 190 14 246 190 14 246 190 14
39777-246 190 14 246 190 14 246 190 14 246 190 14
39778-246 190 14 246 190 14 246 190 14 246 190 14
39779-246 190 14 236 178 12 216 158 10 175 118 6
39780- 80 54 7 2 2 6 6 6 6 30 30 30
39781- 54 54 54 62 62 62 50 50 50 38 38 38
39782- 14 14 14 2 2 6 2 2 6 2 2 6
39783- 2 2 6 2 2 6 2 2 6 2 2 6
39784- 2 2 6 6 6 6 80 54 7 167 114 7
39785-213 154 11 236 178 12 246 190 14 246 190 14
39786-246 190 14 246 190 14 246 190 14 246 190 14
39787-246 190 14 242 186 14 239 182 13 239 182 13
39788-230 174 11 210 150 10 174 135 50 124 112 88
39789- 82 82 82 54 54 54 34 34 34 18 18 18
39790- 6 6 6 0 0 0 0 0 0 0 0 0
39791- 0 0 0 0 0 0 0 0 0 0 0 0
39792- 0 0 0 0 0 0 0 0 0 0 0 0
39793- 0 0 0 0 0 0 6 6 6 18 18 18
39794- 50 50 50 158 118 36 192 133 9 200 144 11
39795-216 158 10 219 162 10 224 166 10 226 170 11
39796-230 174 11 236 178 12 239 182 13 239 182 13
39797-242 186 14 246 186 14 246 190 14 246 190 14
39798-246 190 14 246 190 14 246 190 14 246 190 14
39799-246 186 14 230 174 11 210 150 10 163 110 8
39800-104 69 6 10 10 10 2 2 6 2 2 6
39801- 2 2 6 2 2 6 2 2 6 2 2 6
39802- 2 2 6 2 2 6 2 2 6 2 2 6
39803- 2 2 6 2 2 6 2 2 6 2 2 6
39804- 2 2 6 6 6 6 91 60 6 167 114 7
39805-206 145 10 230 174 11 242 186 14 246 190 14
39806-246 190 14 246 190 14 246 186 14 242 186 14
39807-239 182 13 230 174 11 224 166 10 213 154 11
39808-180 133 36 124 112 88 86 86 86 58 58 58
39809- 38 38 38 22 22 22 10 10 10 6 6 6
39810- 0 0 0 0 0 0 0 0 0 0 0 0
39811- 0 0 0 0 0 0 0 0 0 0 0 0
39812- 0 0 0 0 0 0 0 0 0 0 0 0
39813- 0 0 0 0 0 0 0 0 0 14 14 14
39814- 34 34 34 70 70 70 138 110 50 158 118 36
39815-167 114 7 180 123 7 192 133 9 197 138 11
39816-200 144 11 206 145 10 213 154 11 219 162 10
39817-224 166 10 230 174 11 239 182 13 242 186 14
39818-246 186 14 246 186 14 246 186 14 246 186 14
39819-239 182 13 216 158 10 185 133 11 152 99 6
39820-104 69 6 18 14 6 2 2 6 2 2 6
39821- 2 2 6 2 2 6 2 2 6 2 2 6
39822- 2 2 6 2 2 6 2 2 6 2 2 6
39823- 2 2 6 2 2 6 2 2 6 2 2 6
39824- 2 2 6 6 6 6 80 54 7 152 99 6
39825-192 133 9 219 162 10 236 178 12 239 182 13
39826-246 186 14 242 186 14 239 182 13 236 178 12
39827-224 166 10 206 145 10 192 133 9 154 121 60
39828- 94 94 94 62 62 62 42 42 42 22 22 22
39829- 14 14 14 6 6 6 0 0 0 0 0 0
39830- 0 0 0 0 0 0 0 0 0 0 0 0
39831- 0 0 0 0 0 0 0 0 0 0 0 0
39832- 0 0 0 0 0 0 0 0 0 0 0 0
39833- 0 0 0 0 0 0 0 0 0 6 6 6
39834- 18 18 18 34 34 34 58 58 58 78 78 78
39835-101 98 89 124 112 88 142 110 46 156 107 11
39836-163 110 8 167 114 7 175 118 6 180 123 7
39837-185 133 11 197 138 11 210 150 10 219 162 10
39838-226 170 11 236 178 12 236 178 12 234 174 13
39839-219 162 10 197 138 11 163 110 8 130 83 6
39840- 91 60 6 10 10 10 2 2 6 2 2 6
39841- 18 18 18 38 38 38 38 38 38 38 38 38
39842- 38 38 38 38 38 38 38 38 38 38 38 38
39843- 38 38 38 38 38 38 26 26 26 2 2 6
39844- 2 2 6 6 6 6 70 47 6 137 92 6
39845-175 118 6 200 144 11 219 162 10 230 174 11
39846-234 174 13 230 174 11 219 162 10 210 150 10
39847-192 133 9 163 110 8 124 112 88 82 82 82
39848- 50 50 50 30 30 30 14 14 14 6 6 6
39849- 0 0 0 0 0 0 0 0 0 0 0 0
39850- 0 0 0 0 0 0 0 0 0 0 0 0
39851- 0 0 0 0 0 0 0 0 0 0 0 0
39852- 0 0 0 0 0 0 0 0 0 0 0 0
39853- 0 0 0 0 0 0 0 0 0 0 0 0
39854- 6 6 6 14 14 14 22 22 22 34 34 34
39855- 42 42 42 58 58 58 74 74 74 86 86 86
39856-101 98 89 122 102 70 130 98 46 121 87 25
39857-137 92 6 152 99 6 163 110 8 180 123 7
39858-185 133 11 197 138 11 206 145 10 200 144 11
39859-180 123 7 156 107 11 130 83 6 104 69 6
39860- 50 34 6 54 54 54 110 110 110 101 98 89
39861- 86 86 86 82 82 82 78 78 78 78 78 78
39862- 78 78 78 78 78 78 78 78 78 78 78 78
39863- 78 78 78 82 82 82 86 86 86 94 94 94
39864-106 106 106 101 101 101 86 66 34 124 80 6
39865-156 107 11 180 123 7 192 133 9 200 144 11
39866-206 145 10 200 144 11 192 133 9 175 118 6
39867-139 102 15 109 106 95 70 70 70 42 42 42
39868- 22 22 22 10 10 10 0 0 0 0 0 0
39869- 0 0 0 0 0 0 0 0 0 0 0 0
39870- 0 0 0 0 0 0 0 0 0 0 0 0
39871- 0 0 0 0 0 0 0 0 0 0 0 0
39872- 0 0 0 0 0 0 0 0 0 0 0 0
39873- 0 0 0 0 0 0 0 0 0 0 0 0
39874- 0 0 0 0 0 0 6 6 6 10 10 10
39875- 14 14 14 22 22 22 30 30 30 38 38 38
39876- 50 50 50 62 62 62 74 74 74 90 90 90
39877-101 98 89 112 100 78 121 87 25 124 80 6
39878-137 92 6 152 99 6 152 99 6 152 99 6
39879-138 86 6 124 80 6 98 70 6 86 66 30
39880-101 98 89 82 82 82 58 58 58 46 46 46
39881- 38 38 38 34 34 34 34 34 34 34 34 34
39882- 34 34 34 34 34 34 34 34 34 34 34 34
39883- 34 34 34 34 34 34 38 38 38 42 42 42
39884- 54 54 54 82 82 82 94 86 76 91 60 6
39885-134 86 6 156 107 11 167 114 7 175 118 6
39886-175 118 6 167 114 7 152 99 6 121 87 25
39887-101 98 89 62 62 62 34 34 34 18 18 18
39888- 6 6 6 0 0 0 0 0 0 0 0 0
39889- 0 0 0 0 0 0 0 0 0 0 0 0
39890- 0 0 0 0 0 0 0 0 0 0 0 0
39891- 0 0 0 0 0 0 0 0 0 0 0 0
39892- 0 0 0 0 0 0 0 0 0 0 0 0
39893- 0 0 0 0 0 0 0 0 0 0 0 0
39894- 0 0 0 0 0 0 0 0 0 0 0 0
39895- 0 0 0 6 6 6 6 6 6 10 10 10
39896- 18 18 18 22 22 22 30 30 30 42 42 42
39897- 50 50 50 66 66 66 86 86 86 101 98 89
39898-106 86 58 98 70 6 104 69 6 104 69 6
39899-104 69 6 91 60 6 82 62 34 90 90 90
39900- 62 62 62 38 38 38 22 22 22 14 14 14
39901- 10 10 10 10 10 10 10 10 10 10 10 10
39902- 10 10 10 10 10 10 6 6 6 10 10 10
39903- 10 10 10 10 10 10 10 10 10 14 14 14
39904- 22 22 22 42 42 42 70 70 70 89 81 66
39905- 80 54 7 104 69 6 124 80 6 137 92 6
39906-134 86 6 116 81 8 100 82 52 86 86 86
39907- 58 58 58 30 30 30 14 14 14 6 6 6
39908- 0 0 0 0 0 0 0 0 0 0 0 0
39909- 0 0 0 0 0 0 0 0 0 0 0 0
39910- 0 0 0 0 0 0 0 0 0 0 0 0
39911- 0 0 0 0 0 0 0 0 0 0 0 0
39912- 0 0 0 0 0 0 0 0 0 0 0 0
39913- 0 0 0 0 0 0 0 0 0 0 0 0
39914- 0 0 0 0 0 0 0 0 0 0 0 0
39915- 0 0 0 0 0 0 0 0 0 0 0 0
39916- 0 0 0 6 6 6 10 10 10 14 14 14
39917- 18 18 18 26 26 26 38 38 38 54 54 54
39918- 70 70 70 86 86 86 94 86 76 89 81 66
39919- 89 81 66 86 86 86 74 74 74 50 50 50
39920- 30 30 30 14 14 14 6 6 6 0 0 0
39921- 0 0 0 0 0 0 0 0 0 0 0 0
39922- 0 0 0 0 0 0 0 0 0 0 0 0
39923- 0 0 0 0 0 0 0 0 0 0 0 0
39924- 6 6 6 18 18 18 34 34 34 58 58 58
39925- 82 82 82 89 81 66 89 81 66 89 81 66
39926- 94 86 66 94 86 76 74 74 74 50 50 50
39927- 26 26 26 14 14 14 6 6 6 0 0 0
39928- 0 0 0 0 0 0 0 0 0 0 0 0
39929- 0 0 0 0 0 0 0 0 0 0 0 0
39930- 0 0 0 0 0 0 0 0 0 0 0 0
39931- 0 0 0 0 0 0 0 0 0 0 0 0
39932- 0 0 0 0 0 0 0 0 0 0 0 0
39933- 0 0 0 0 0 0 0 0 0 0 0 0
39934- 0 0 0 0 0 0 0 0 0 0 0 0
39935- 0 0 0 0 0 0 0 0 0 0 0 0
39936- 0 0 0 0 0 0 0 0 0 0 0 0
39937- 6 6 6 6 6 6 14 14 14 18 18 18
39938- 30 30 30 38 38 38 46 46 46 54 54 54
39939- 50 50 50 42 42 42 30 30 30 18 18 18
39940- 10 10 10 0 0 0 0 0 0 0 0 0
39941- 0 0 0 0 0 0 0 0 0 0 0 0
39942- 0 0 0 0 0 0 0 0 0 0 0 0
39943- 0 0 0 0 0 0 0 0 0 0 0 0
39944- 0 0 0 6 6 6 14 14 14 26 26 26
39945- 38 38 38 50 50 50 58 58 58 58 58 58
39946- 54 54 54 42 42 42 30 30 30 18 18 18
39947- 10 10 10 0 0 0 0 0 0 0 0 0
39948- 0 0 0 0 0 0 0 0 0 0 0 0
39949- 0 0 0 0 0 0 0 0 0 0 0 0
39950- 0 0 0 0 0 0 0 0 0 0 0 0
39951- 0 0 0 0 0 0 0 0 0 0 0 0
39952- 0 0 0 0 0 0 0 0 0 0 0 0
39953- 0 0 0 0 0 0 0 0 0 0 0 0
39954- 0 0 0 0 0 0 0 0 0 0 0 0
39955- 0 0 0 0 0 0 0 0 0 0 0 0
39956- 0 0 0 0 0 0 0 0 0 0 0 0
39957- 0 0 0 0 0 0 0 0 0 6 6 6
39958- 6 6 6 10 10 10 14 14 14 18 18 18
39959- 18 18 18 14 14 14 10 10 10 6 6 6
39960- 0 0 0 0 0 0 0 0 0 0 0 0
39961- 0 0 0 0 0 0 0 0 0 0 0 0
39962- 0 0 0 0 0 0 0 0 0 0 0 0
39963- 0 0 0 0 0 0 0 0 0 0 0 0
39964- 0 0 0 0 0 0 0 0 0 6 6 6
39965- 14 14 14 18 18 18 22 22 22 22 22 22
39966- 18 18 18 14 14 14 10 10 10 6 6 6
39967- 0 0 0 0 0 0 0 0 0 0 0 0
39968- 0 0 0 0 0 0 0 0 0 0 0 0
39969- 0 0 0 0 0 0 0 0 0 0 0 0
39970- 0 0 0 0 0 0 0 0 0 0 0 0
39971- 0 0 0 0 0 0 0 0 0 0 0 0
39972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39974+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39975+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39983+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39985+4 4 4 4 4 4
39986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39988+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39997+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39999+4 4 4 4 4 4
40000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40002+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40013+4 4 4 4 4 4
40014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40027+4 4 4 4 4 4
40028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40041+4 4 4 4 4 4
40042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40055+4 4 4 4 4 4
40056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40060+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
40061+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
40062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40065+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
40066+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40067+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
40068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40069+4 4 4 4 4 4
40070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40074+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
40075+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
40076+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40079+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
40080+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
40081+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
40082+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40083+4 4 4 4 4 4
40084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40088+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
40089+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
40090+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40092+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40093+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
40094+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
40095+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
40096+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
40097+4 4 4 4 4 4
40098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40101+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
40102+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
40103+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
40104+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
40105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40106+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40107+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
40108+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
40109+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
40110+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
40111+4 4 4 4 4 4
40112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40115+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
40116+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
40117+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
40118+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
40119+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40120+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
40121+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
40122+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
40123+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
40124+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
40125+4 4 4 4 4 4
40126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40129+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
40130+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
40131+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
40132+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
40133+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
40134+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
40135+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
40136+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
40137+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
40138+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
40139+4 4 4 4 4 4
40140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40142+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
40143+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
40144+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
40145+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
40146+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
40147+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
40148+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
40149+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
40150+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
40151+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
40152+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
40153+4 4 4 4 4 4
40154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40156+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
40157+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
40158+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
40159+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
40160+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
40161+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
40162+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
40163+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
40164+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
40165+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
40166+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
40167+4 4 4 4 4 4
40168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40170+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
40171+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
40172+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
40173+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
40174+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
40175+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
40176+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
40177+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
40178+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
40179+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
40180+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40181+4 4 4 4 4 4
40182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40184+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
40185+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
40186+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
40187+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
40188+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
40189+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
40190+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
40191+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
40192+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
40193+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
40194+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
40195+4 4 4 4 4 4
40196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40197+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
40198+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
40199+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
40200+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
40201+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
40202+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
40203+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
40204+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
40205+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
40206+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
40207+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
40208+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
40209+4 4 4 4 4 4
40210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40211+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
40212+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
40213+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
40214+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40215+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
40216+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
40217+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
40218+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
40219+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
40220+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
40221+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
40222+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
40223+0 0 0 4 4 4
40224+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40225+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
40226+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
40227+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
40228+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
40229+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
40230+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
40231+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
40232+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
40233+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
40234+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
40235+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
40236+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
40237+2 0 0 0 0 0
40238+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
40239+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
40240+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
40241+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
40242+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
40243+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
40244+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
40245+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
40246+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
40247+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
40248+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
40249+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
40250+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
40251+37 38 37 0 0 0
40252+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40253+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
40254+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
40255+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
40256+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
40257+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
40258+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
40259+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
40260+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
40261+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
40262+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
40263+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
40264+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
40265+85 115 134 4 0 0
40266+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
40267+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
40268+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
40269+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
40270+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
40271+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
40272+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
40273+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
40274+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
40275+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
40276+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
40277+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
40278+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
40279+60 73 81 4 0 0
40280+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
40281+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
40282+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
40283+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
40284+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
40285+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
40286+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
40287+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
40288+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
40289+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
40290+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
40291+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
40292+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
40293+16 19 21 4 0 0
40294+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
40295+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
40296+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
40297+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
40298+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
40299+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
40300+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
40301+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
40302+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
40303+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
40304+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
40305+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
40306+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
40307+4 0 0 4 3 3
40308+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
40309+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
40310+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
40311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
40312+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
40313+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
40314+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
40315+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
40316+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
40317+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
40318+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
40319+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
40320+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
40321+3 2 2 4 4 4
40322+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
40323+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
40324+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
40325+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40326+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
40327+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
40328+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
40329+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
40330+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
40331+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
40332+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
40333+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
40334+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
40335+4 4 4 4 4 4
40336+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
40337+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
40338+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
40339+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
40340+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
40341+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
40342+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
40343+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
40344+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
40345+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
40346+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
40347+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
40348+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
40349+4 4 4 4 4 4
40350+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
40351+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
40352+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
40353+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
40354+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
40355+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40356+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
40357+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
40358+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
40359+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
40360+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
40361+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
40362+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
40363+5 5 5 5 5 5
40364+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
40365+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
40366+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
40367+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
40368+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
40369+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40370+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
40371+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
40372+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
40373+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
40374+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
40375+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
40376+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40377+5 5 5 4 4 4
40378+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
40379+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
40380+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
40381+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
40382+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40383+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
40384+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
40385+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
40386+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
40387+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
40388+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
40389+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40390+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40391+4 4 4 4 4 4
40392+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
40393+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
40394+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
40395+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
40396+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
40397+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40398+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40399+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
40400+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
40401+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
40402+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
40403+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
40404+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40405+4 4 4 4 4 4
40406+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
40407+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
40408+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
40409+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
40410+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40411+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
40412+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
40413+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
40414+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
40415+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
40416+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
40417+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40418+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40419+4 4 4 4 4 4
40420+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
40421+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
40422+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
40423+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
40424+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40425+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40426+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40427+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
40428+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
40429+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
40430+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
40431+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40433+4 4 4 4 4 4
40434+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
40435+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
40436+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
40437+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
40438+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40439+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
40440+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40441+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
40442+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
40443+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
40444+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40445+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40447+4 4 4 4 4 4
40448+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
40449+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
40450+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
40451+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
40452+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40453+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
40454+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
40455+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
40456+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
40457+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
40458+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
40459+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40461+4 4 4 4 4 4
40462+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
40463+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
40464+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
40465+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
40466+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40467+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
40468+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
40469+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
40470+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
40471+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
40472+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
40473+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40475+4 4 4 4 4 4
40476+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
40477+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
40478+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
40479+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40480+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
40481+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
40482+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
40483+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
40484+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
40485+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
40486+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40487+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40489+4 4 4 4 4 4
40490+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
40491+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
40492+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
40493+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40494+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40495+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
40496+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
40497+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
40498+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
40499+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
40500+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40501+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40503+4 4 4 4 4 4
40504+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
40505+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
40506+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40507+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40508+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40509+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
40510+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
40511+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
40512+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
40513+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
40514+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40515+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40517+4 4 4 4 4 4
40518+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
40519+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
40520+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40521+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40522+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40523+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
40524+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
40525+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
40526+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40527+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40528+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40529+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40530+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40531+4 4 4 4 4 4
40532+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40533+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
40534+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40535+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
40536+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
40537+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
40538+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
40539+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
40540+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40541+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40542+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40543+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40545+4 4 4 4 4 4
40546+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40547+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
40548+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40549+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
40550+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40551+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
40552+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
40553+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
40554+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40555+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40556+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40557+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40558+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40559+4 4 4 4 4 4
40560+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
40561+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
40562+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40563+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
40564+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
40565+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
40566+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
40567+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
40568+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40569+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40570+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40571+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40573+4 4 4 4 4 4
40574+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
40575+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
40576+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40577+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
40578+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
40579+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
40580+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
40581+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
40582+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40583+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40584+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40585+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40587+4 4 4 4 4 4
40588+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40589+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
40590+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40591+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
40592+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
40593+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
40594+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
40595+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
40596+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40597+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40598+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40599+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40601+4 4 4 4 4 4
40602+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
40603+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
40604+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40605+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
40606+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
40607+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
40608+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
40609+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
40610+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
40611+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40612+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40613+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40615+4 4 4 4 4 4
40616+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40617+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
40618+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
40619+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
40620+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
40621+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
40622+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
40623+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
40624+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40625+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40626+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40627+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40629+4 4 4 4 4 4
40630+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40631+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
40632+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40633+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
40634+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
40635+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
40636+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
40637+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
40638+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40639+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40640+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40641+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40643+4 4 4 4 4 4
40644+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40645+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
40646+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
40647+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
40648+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
40649+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
40650+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40651+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
40652+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40653+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40654+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40655+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40657+4 4 4 4 4 4
40658+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40659+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
40660+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
40661+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40662+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
40663+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
40664+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40665+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
40666+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40667+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40668+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40671+4 4 4 4 4 4
40672+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40673+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
40674+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
40675+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
40676+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
40677+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
40678+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
40679+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
40680+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
40681+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40682+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40685+4 4 4 4 4 4
40686+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40687+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
40688+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
40689+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
40690+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
40691+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
40692+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
40693+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
40694+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
40695+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40696+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40697+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40699+4 4 4 4 4 4
40700+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
40701+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
40702+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
40703+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
40704+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40705+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
40706+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
40707+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
40708+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
40709+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40710+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40711+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40713+4 4 4 4 4 4
40714+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40715+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
40716+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
40717+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
40718+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
40719+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
40720+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
40721+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
40722+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
40723+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40724+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40727+4 4 4 4 4 4
40728+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
40729+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
40730+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
40731+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
40732+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
40733+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
40734+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
40735+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
40736+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
40737+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
40738+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40741+4 4 4 4 4 4
40742+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
40743+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
40744+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
40745+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
40746+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
40747+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
40748+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
40749+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
40750+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
40751+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
40752+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40755+4 4 4 4 4 4
40756+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
40757+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
40758+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
40759+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
40760+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
40761+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
40762+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40763+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
40764+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
40765+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
40766+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40769+4 4 4 4 4 4
40770+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
40771+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
40772+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
40773+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
40774+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
40775+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
40776+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
40777+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
40778+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
40779+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
40780+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40783+4 4 4 4 4 4
40784+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
40785+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
40786+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
40787+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
40788+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
40789+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
40790+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
40791+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
40792+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
40793+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
40794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40797+4 4 4 4 4 4
40798+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40799+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
40800+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
40801+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
40802+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
40803+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
40804+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
40805+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
40806+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
40807+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
40808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40811+4 4 4 4 4 4
40812+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
40813+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
40814+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
40815+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
40816+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
40817+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
40818+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
40819+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
40820+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
40821+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40825+4 4 4 4 4 4
40826+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
40827+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
40828+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
40829+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
40830+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
40831+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
40832+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
40833+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
40834+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40835+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40839+4 4 4 4 4 4
40840+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
40841+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40842+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
40843+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
40844+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
40845+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
40846+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
40847+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
40848+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
40849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40853+4 4 4 4 4 4
40854+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
40855+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
40856+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
40857+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
40858+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
40859+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
40860+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
40861+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
40862+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
40863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40867+4 4 4 4 4 4
40868+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
40869+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
40870+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
40871+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
40872+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
40873+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
40874+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
40875+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
40876+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40877+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40881+4 4 4 4 4 4
40882+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
40883+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
40884+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
40885+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
40886+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
40887+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
40888+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
40889+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
40890+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40891+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40894+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40895+4 4 4 4 4 4
40896+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
40897+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
40898+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
40899+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
40900+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
40901+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
40902+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
40903+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
40904+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40905+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40908+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40909+4 4 4 4 4 4
40910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40911+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
40912+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40913+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
40914+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
40915+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
40916+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
40917+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
40918+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40919+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40922+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40923+4 4 4 4 4 4
40924+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40925+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
40926+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
40927+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
40928+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
40929+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
40930+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
40931+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
40932+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40933+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40937+4 4 4 4 4 4
40938+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40939+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40940+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
40941+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
40942+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
40943+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
40944+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
40945+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40946+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40947+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40948+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40949+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40950+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40951+4 4 4 4 4 4
40952+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40953+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40954+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
40955+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
40956+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
40957+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
40958+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
40959+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40960+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40961+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40965+4 4 4 4 4 4
40966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40969+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40970+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
40971+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
40972+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
40973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40974+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40975+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40979+4 4 4 4 4 4
40980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40983+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
40984+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
40985+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
40986+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
40987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40988+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40993+4 4 4 4 4 4
40994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40997+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
40998+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40999+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41000+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
41001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41002+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41007+4 4 4 4 4 4
41008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41011+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
41012+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
41013+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
41014+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
41015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41021+4 4 4 4 4 4
41022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41026+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
41027+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41028+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41035+4 4 4 4 4 4
41036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41040+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
41041+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
41042+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
41043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41049+4 4 4 4 4 4
41050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41054+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
41055+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
41056+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41063+4 4 4 4 4 4
41064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41068+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
41069+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
41070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41077+4 4 4 4 4 4
41078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41079+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41082+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41083+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
41084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41091+4 4 4 4 4 4
41092diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
41093index a159b63..4ab532d 100644
41094--- a/drivers/video/udlfb.c
41095+++ b/drivers/video/udlfb.c
41096@@ -620,11 +620,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
41097 dlfb_urb_completion(urb);
41098
41099 error:
41100- atomic_add(bytes_sent, &dev->bytes_sent);
41101- atomic_add(bytes_identical, &dev->bytes_identical);
41102- atomic_add(width*height*2, &dev->bytes_rendered);
41103+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41104+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41105+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
41106 end_cycles = get_cycles();
41107- atomic_add(((unsigned int) ((end_cycles - start_cycles)
41108+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41109 >> 10)), /* Kcycles */
41110 &dev->cpu_kcycles_used);
41111
41112@@ -745,11 +745,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
41113 dlfb_urb_completion(urb);
41114
41115 error:
41116- atomic_add(bytes_sent, &dev->bytes_sent);
41117- atomic_add(bytes_identical, &dev->bytes_identical);
41118- atomic_add(bytes_rendered, &dev->bytes_rendered);
41119+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41120+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41121+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
41122 end_cycles = get_cycles();
41123- atomic_add(((unsigned int) ((end_cycles - start_cycles)
41124+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41125 >> 10)), /* Kcycles */
41126 &dev->cpu_kcycles_used);
41127 }
41128@@ -1373,7 +1373,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
41129 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41130 struct dlfb_data *dev = fb_info->par;
41131 return snprintf(buf, PAGE_SIZE, "%u\n",
41132- atomic_read(&dev->bytes_rendered));
41133+ atomic_read_unchecked(&dev->bytes_rendered));
41134 }
41135
41136 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41137@@ -1381,7 +1381,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41138 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41139 struct dlfb_data *dev = fb_info->par;
41140 return snprintf(buf, PAGE_SIZE, "%u\n",
41141- atomic_read(&dev->bytes_identical));
41142+ atomic_read_unchecked(&dev->bytes_identical));
41143 }
41144
41145 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41146@@ -1389,7 +1389,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41147 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41148 struct dlfb_data *dev = fb_info->par;
41149 return snprintf(buf, PAGE_SIZE, "%u\n",
41150- atomic_read(&dev->bytes_sent));
41151+ atomic_read_unchecked(&dev->bytes_sent));
41152 }
41153
41154 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41155@@ -1397,7 +1397,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41156 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41157 struct dlfb_data *dev = fb_info->par;
41158 return snprintf(buf, PAGE_SIZE, "%u\n",
41159- atomic_read(&dev->cpu_kcycles_used));
41160+ atomic_read_unchecked(&dev->cpu_kcycles_used));
41161 }
41162
41163 static ssize_t edid_show(
41164@@ -1457,10 +1457,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
41165 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41166 struct dlfb_data *dev = fb_info->par;
41167
41168- atomic_set(&dev->bytes_rendered, 0);
41169- atomic_set(&dev->bytes_identical, 0);
41170- atomic_set(&dev->bytes_sent, 0);
41171- atomic_set(&dev->cpu_kcycles_used, 0);
41172+ atomic_set_unchecked(&dev->bytes_rendered, 0);
41173+ atomic_set_unchecked(&dev->bytes_identical, 0);
41174+ atomic_set_unchecked(&dev->bytes_sent, 0);
41175+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
41176
41177 return count;
41178 }
41179diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
41180index b0e2a42..e2df3ad 100644
41181--- a/drivers/video/uvesafb.c
41182+++ b/drivers/video/uvesafb.c
41183@@ -19,6 +19,7 @@
41184 #include <linux/io.h>
41185 #include <linux/mutex.h>
41186 #include <linux/slab.h>
41187+#include <linux/moduleloader.h>
41188 #include <video/edid.h>
41189 #include <video/uvesafb.h>
41190 #ifdef CONFIG_X86
41191@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
41192 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
41193 par->pmi_setpal = par->ypan = 0;
41194 } else {
41195+
41196+#ifdef CONFIG_PAX_KERNEXEC
41197+#ifdef CONFIG_MODULES
41198+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
41199+#endif
41200+ if (!par->pmi_code) {
41201+ par->pmi_setpal = par->ypan = 0;
41202+ return 0;
41203+ }
41204+#endif
41205+
41206 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
41207 + task->t.regs.edi);
41208+
41209+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41210+ pax_open_kernel();
41211+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
41212+ pax_close_kernel();
41213+
41214+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
41215+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
41216+#else
41217 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
41218 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
41219+#endif
41220+
41221 printk(KERN_INFO "uvesafb: protected mode interface info at "
41222 "%04x:%04x\n",
41223 (u16)task->t.regs.es, (u16)task->t.regs.edi);
41224@@ -816,13 +839,14 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
41225 par->ypan = ypan;
41226
41227 if (par->pmi_setpal || par->ypan) {
41228+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
41229 if (__supported_pte_mask & _PAGE_NX) {
41230 par->pmi_setpal = par->ypan = 0;
41231 printk(KERN_WARNING "uvesafb: NX protection is actively."
41232 "We have better not to use the PMI.\n");
41233- } else {
41234+ } else
41235+#endif
41236 uvesafb_vbe_getpmi(task, par);
41237- }
41238 }
41239 #else
41240 /* The protected mode interface is not available on non-x86. */
41241@@ -1836,6 +1860,11 @@ out:
41242 if (par->vbe_modes)
41243 kfree(par->vbe_modes);
41244
41245+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41246+ if (par->pmi_code)
41247+ module_free_exec(NULL, par->pmi_code);
41248+#endif
41249+
41250 framebuffer_release(info);
41251 return err;
41252 }
41253@@ -1862,6 +1891,12 @@ static int uvesafb_remove(struct platform_device *dev)
41254 kfree(par->vbe_state_orig);
41255 if (par->vbe_state_saved)
41256 kfree(par->vbe_state_saved);
41257+
41258+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41259+ if (par->pmi_code)
41260+ module_free_exec(NULL, par->pmi_code);
41261+#endif
41262+
41263 }
41264
41265 framebuffer_release(info);
41266diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
41267index 501b340..86bd4cf 100644
41268--- a/drivers/video/vesafb.c
41269+++ b/drivers/video/vesafb.c
41270@@ -9,6 +9,7 @@
41271 */
41272
41273 #include <linux/module.h>
41274+#include <linux/moduleloader.h>
41275 #include <linux/kernel.h>
41276 #include <linux/errno.h>
41277 #include <linux/string.h>
41278@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
41279 static int vram_total __initdata; /* Set total amount of memory */
41280 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
41281 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
41282-static void (*pmi_start)(void) __read_mostly;
41283-static void (*pmi_pal) (void) __read_mostly;
41284+static void (*pmi_start)(void) __read_only;
41285+static void (*pmi_pal) (void) __read_only;
41286 static int depth __read_mostly;
41287 static int vga_compat __read_mostly;
41288 /* --------------------------------------------------------------------- */
41289@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
41290 unsigned int size_vmode;
41291 unsigned int size_remap;
41292 unsigned int size_total;
41293+ void *pmi_code = NULL;
41294
41295 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
41296 return -ENODEV;
41297@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
41298 size_remap = size_total;
41299 vesafb_fix.smem_len = size_remap;
41300
41301-#ifndef __i386__
41302- screen_info.vesapm_seg = 0;
41303-#endif
41304-
41305 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
41306 printk(KERN_WARNING
41307 "vesafb: cannot reserve video memory at 0x%lx\n",
41308@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
41309 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
41310 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
41311
41312+#ifdef __i386__
41313+
41314+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41315+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
41316+ if (!pmi_code)
41317+#elif !defined(CONFIG_PAX_KERNEXEC)
41318+ if (0)
41319+#endif
41320+
41321+#endif
41322+ screen_info.vesapm_seg = 0;
41323+
41324 if (screen_info.vesapm_seg) {
41325- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
41326- screen_info.vesapm_seg,screen_info.vesapm_off);
41327+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
41328+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
41329 }
41330
41331 if (screen_info.vesapm_seg < 0xc000)
41332@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
41333
41334 if (ypan || pmi_setpal) {
41335 unsigned short *pmi_base;
41336+
41337 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
41338- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
41339- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
41340+
41341+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41342+ pax_open_kernel();
41343+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
41344+#else
41345+ pmi_code = pmi_base;
41346+#endif
41347+
41348+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
41349+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
41350+
41351+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41352+ pmi_start = ktva_ktla(pmi_start);
41353+ pmi_pal = ktva_ktla(pmi_pal);
41354+ pax_close_kernel();
41355+#endif
41356+
41357 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
41358 if (pmi_base[3]) {
41359 printk(KERN_INFO "vesafb: pmi: ports = ");
41360@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
41361 info->node, info->fix.id);
41362 return 0;
41363 err:
41364+
41365+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41366+ module_free_exec(NULL, pmi_code);
41367+#endif
41368+
41369 if (info->screen_base)
41370 iounmap(info->screen_base);
41371 framebuffer_release(info);
41372diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
41373index 88714ae..16c2e11 100644
41374--- a/drivers/video/via/via_clock.h
41375+++ b/drivers/video/via/via_clock.h
41376@@ -56,7 +56,7 @@ struct via_clock {
41377
41378 void (*set_engine_pll_state)(u8 state);
41379 void (*set_engine_pll)(struct via_pll_config config);
41380-};
41381+} __no_const;
41382
41383
41384 static inline u32 get_pll_internal_frequency(u32 ref_freq,
41385diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
41386index e56c934..fc22f4b 100644
41387--- a/drivers/xen/xen-pciback/conf_space.h
41388+++ b/drivers/xen/xen-pciback/conf_space.h
41389@@ -44,15 +44,15 @@ struct config_field {
41390 struct {
41391 conf_dword_write write;
41392 conf_dword_read read;
41393- } dw;
41394+ } __no_const dw;
41395 struct {
41396 conf_word_write write;
41397 conf_word_read read;
41398- } w;
41399+ } __no_const w;
41400 struct {
41401 conf_byte_write write;
41402 conf_byte_read read;
41403- } b;
41404+ } __no_const b;
41405 } u;
41406 struct list_head list;
41407 };
41408diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
41409index 014c8dd..6f3dfe6 100644
41410--- a/fs/9p/vfs_inode.c
41411+++ b/fs/9p/vfs_inode.c
41412@@ -1303,7 +1303,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41413 void
41414 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41415 {
41416- char *s = nd_get_link(nd);
41417+ const char *s = nd_get_link(nd);
41418
41419 p9_debug(P9_DEBUG_VFS, " %s %s\n",
41420 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
41421diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
41422index e95d1b6..3454244 100644
41423--- a/fs/Kconfig.binfmt
41424+++ b/fs/Kconfig.binfmt
41425@@ -89,7 +89,7 @@ config HAVE_AOUT
41426
41427 config BINFMT_AOUT
41428 tristate "Kernel support for a.out and ECOFF binaries"
41429- depends on HAVE_AOUT
41430+ depends on HAVE_AOUT && BROKEN
41431 ---help---
41432 A.out (Assembler.OUTput) is a set of formats for libraries and
41433 executables used in the earliest versions of UNIX. Linux used
41434diff --git a/fs/aio.c b/fs/aio.c
41435index e7f2fad..15ad8a4 100644
41436--- a/fs/aio.c
41437+++ b/fs/aio.c
41438@@ -118,7 +118,7 @@ static int aio_setup_ring(struct kioctx *ctx)
41439 size += sizeof(struct io_event) * nr_events;
41440 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
41441
41442- if (nr_pages < 0)
41443+ if (nr_pages <= 0)
41444 return -EINVAL;
41445
41446 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
41447@@ -1440,18 +1440,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
41448 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
41449 {
41450 ssize_t ret;
41451+ struct iovec iovstack;
41452
41453 #ifdef CONFIG_COMPAT
41454 if (compat)
41455 ret = compat_rw_copy_check_uvector(type,
41456 (struct compat_iovec __user *)kiocb->ki_buf,
41457- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41458+ kiocb->ki_nbytes, 1, &iovstack,
41459 &kiocb->ki_iovec, 1);
41460 else
41461 #endif
41462 ret = rw_copy_check_uvector(type,
41463 (struct iovec __user *)kiocb->ki_buf,
41464- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41465+ kiocb->ki_nbytes, 1, &iovstack,
41466 &kiocb->ki_iovec, 1);
41467 if (ret < 0)
41468 goto out;
41469@@ -1460,6 +1461,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
41470 if (ret < 0)
41471 goto out;
41472
41473+ if (kiocb->ki_iovec == &iovstack) {
41474+ kiocb->ki_inline_vec = iovstack;
41475+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
41476+ }
41477 kiocb->ki_nr_segs = kiocb->ki_nbytes;
41478 kiocb->ki_cur_seg = 0;
41479 /* ki_nbytes/left now reflect bytes instead of segs */
41480diff --git a/fs/attr.c b/fs/attr.c
41481index d94d1b6..f9bccd6 100644
41482--- a/fs/attr.c
41483+++ b/fs/attr.c
41484@@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
41485 unsigned long limit;
41486
41487 limit = rlimit(RLIMIT_FSIZE);
41488+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
41489 if (limit != RLIM_INFINITY && offset > limit)
41490 goto out_sig;
41491 if (offset > inode->i_sb->s_maxbytes)
41492diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
41493index da8876d..9f3e6d8 100644
41494--- a/fs/autofs4/waitq.c
41495+++ b/fs/autofs4/waitq.c
41496@@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
41497 {
41498 unsigned long sigpipe, flags;
41499 mm_segment_t fs;
41500- const char *data = (const char *)addr;
41501+ const char __user *data = (const char __force_user *)addr;
41502 ssize_t wr = 0;
41503
41504 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
41505diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
41506index e18da23..affc30e 100644
41507--- a/fs/befs/linuxvfs.c
41508+++ b/fs/befs/linuxvfs.c
41509@@ -502,7 +502,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41510 {
41511 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
41512 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
41513- char *link = nd_get_link(nd);
41514+ const char *link = nd_get_link(nd);
41515 if (!IS_ERR(link))
41516 kfree(link);
41517 }
41518diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
41519index d146e18..12d1bd1 100644
41520--- a/fs/binfmt_aout.c
41521+++ b/fs/binfmt_aout.c
41522@@ -16,6 +16,7 @@
41523 #include <linux/string.h>
41524 #include <linux/fs.h>
41525 #include <linux/file.h>
41526+#include <linux/security.h>
41527 #include <linux/stat.h>
41528 #include <linux/fcntl.h>
41529 #include <linux/ptrace.h>
41530@@ -83,6 +84,8 @@ static int aout_core_dump(struct coredump_params *cprm)
41531 #endif
41532 # define START_STACK(u) ((void __user *)u.start_stack)
41533
41534+ memset(&dump, 0, sizeof(dump));
41535+
41536 fs = get_fs();
41537 set_fs(KERNEL_DS);
41538 has_dumped = 1;
41539@@ -94,10 +97,12 @@ static int aout_core_dump(struct coredump_params *cprm)
41540
41541 /* If the size of the dump file exceeds the rlimit, then see what would happen
41542 if we wrote the stack, but not the data area. */
41543+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
41544 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
41545 dump.u_dsize = 0;
41546
41547 /* Make sure we have enough room to write the stack and data areas. */
41548+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
41549 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
41550 dump.u_ssize = 0;
41551
41552@@ -231,6 +236,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41553 rlim = rlimit(RLIMIT_DATA);
41554 if (rlim >= RLIM_INFINITY)
41555 rlim = ~0;
41556+
41557+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
41558 if (ex.a_data + ex.a_bss > rlim)
41559 return -ENOMEM;
41560
41561@@ -265,6 +272,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41562
41563 install_exec_creds(bprm);
41564
41565+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41566+ current->mm->pax_flags = 0UL;
41567+#endif
41568+
41569+#ifdef CONFIG_PAX_PAGEEXEC
41570+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
41571+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
41572+
41573+#ifdef CONFIG_PAX_EMUTRAMP
41574+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
41575+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
41576+#endif
41577+
41578+#ifdef CONFIG_PAX_MPROTECT
41579+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
41580+ current->mm->pax_flags |= MF_PAX_MPROTECT;
41581+#endif
41582+
41583+ }
41584+#endif
41585+
41586 if (N_MAGIC(ex) == OMAGIC) {
41587 unsigned long text_addr, map_size;
41588 loff_t pos;
41589@@ -330,7 +358,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41590 }
41591
41592 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
41593- PROT_READ | PROT_WRITE | PROT_EXEC,
41594+ PROT_READ | PROT_WRITE,
41595 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
41596 fd_offset + ex.a_text);
41597 if (error != N_DATADDR(ex)) {
41598diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
41599index 16f7354..185d8dc 100644
41600--- a/fs/binfmt_elf.c
41601+++ b/fs/binfmt_elf.c
41602@@ -32,6 +32,7 @@
41603 #include <linux/elf.h>
41604 #include <linux/utsname.h>
41605 #include <linux/coredump.h>
41606+#include <linux/xattr.h>
41607 #include <asm/uaccess.h>
41608 #include <asm/param.h>
41609 #include <asm/page.h>
41610@@ -52,6 +53,10 @@ static int elf_core_dump(struct coredump_params *cprm);
41611 #define elf_core_dump NULL
41612 #endif
41613
41614+#ifdef CONFIG_PAX_MPROTECT
41615+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
41616+#endif
41617+
41618 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
41619 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
41620 #else
41621@@ -71,6 +76,11 @@ static struct linux_binfmt elf_format = {
41622 .load_binary = load_elf_binary,
41623 .load_shlib = load_elf_library,
41624 .core_dump = elf_core_dump,
41625+
41626+#ifdef CONFIG_PAX_MPROTECT
41627+ .handle_mprotect= elf_handle_mprotect,
41628+#endif
41629+
41630 .min_coredump = ELF_EXEC_PAGESIZE,
41631 };
41632
41633@@ -78,6 +88,8 @@ static struct linux_binfmt elf_format = {
41634
41635 static int set_brk(unsigned long start, unsigned long end)
41636 {
41637+ unsigned long e = end;
41638+
41639 start = ELF_PAGEALIGN(start);
41640 end = ELF_PAGEALIGN(end);
41641 if (end > start) {
41642@@ -86,7 +98,7 @@ static int set_brk(unsigned long start, unsigned long end)
41643 if (BAD_ADDR(addr))
41644 return addr;
41645 }
41646- current->mm->start_brk = current->mm->brk = end;
41647+ current->mm->start_brk = current->mm->brk = e;
41648 return 0;
41649 }
41650
41651@@ -147,12 +159,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41652 elf_addr_t __user *u_rand_bytes;
41653 const char *k_platform = ELF_PLATFORM;
41654 const char *k_base_platform = ELF_BASE_PLATFORM;
41655- unsigned char k_rand_bytes[16];
41656+ u32 k_rand_bytes[4];
41657 int items;
41658 elf_addr_t *elf_info;
41659 int ei_index = 0;
41660 const struct cred *cred = current_cred();
41661 struct vm_area_struct *vma;
41662+ unsigned long saved_auxv[AT_VECTOR_SIZE];
41663
41664 /*
41665 * In some cases (e.g. Hyper-Threading), we want to avoid L1
41666@@ -194,8 +207,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41667 * Generate 16 random bytes for userspace PRNG seeding.
41668 */
41669 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
41670- u_rand_bytes = (elf_addr_t __user *)
41671- STACK_ALLOC(p, sizeof(k_rand_bytes));
41672+ srandom32(k_rand_bytes[0] ^ random32());
41673+ srandom32(k_rand_bytes[1] ^ random32());
41674+ srandom32(k_rand_bytes[2] ^ random32());
41675+ srandom32(k_rand_bytes[3] ^ random32());
41676+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
41677+ u_rand_bytes = (elf_addr_t __user *) p;
41678 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
41679 return -EFAULT;
41680
41681@@ -307,9 +324,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41682 return -EFAULT;
41683 current->mm->env_end = p;
41684
41685+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
41686+
41687 /* Put the elf_info on the stack in the right place. */
41688 sp = (elf_addr_t __user *)envp + 1;
41689- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
41690+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
41691 return -EFAULT;
41692 return 0;
41693 }
41694@@ -380,10 +399,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41695 {
41696 struct elf_phdr *elf_phdata;
41697 struct elf_phdr *eppnt;
41698- unsigned long load_addr = 0;
41699+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
41700 int load_addr_set = 0;
41701 unsigned long last_bss = 0, elf_bss = 0;
41702- unsigned long error = ~0UL;
41703+ unsigned long error = -EINVAL;
41704 unsigned long total_size;
41705 int retval, i, size;
41706
41707@@ -429,6 +448,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41708 goto out_close;
41709 }
41710
41711+#ifdef CONFIG_PAX_SEGMEXEC
41712+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
41713+ pax_task_size = SEGMEXEC_TASK_SIZE;
41714+#endif
41715+
41716 eppnt = elf_phdata;
41717 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
41718 if (eppnt->p_type == PT_LOAD) {
41719@@ -472,8 +496,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41720 k = load_addr + eppnt->p_vaddr;
41721 if (BAD_ADDR(k) ||
41722 eppnt->p_filesz > eppnt->p_memsz ||
41723- eppnt->p_memsz > TASK_SIZE ||
41724- TASK_SIZE - eppnt->p_memsz < k) {
41725+ eppnt->p_memsz > pax_task_size ||
41726+ pax_task_size - eppnt->p_memsz < k) {
41727 error = -ENOMEM;
41728 goto out_close;
41729 }
41730@@ -525,6 +549,351 @@ out:
41731 return error;
41732 }
41733
41734+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
41735+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
41736+{
41737+ unsigned long pax_flags = 0UL;
41738+
41739+#ifdef CONFIG_PAX_PT_PAX_FLAGS
41740+
41741+#ifdef CONFIG_PAX_PAGEEXEC
41742+ if (elf_phdata->p_flags & PF_PAGEEXEC)
41743+ pax_flags |= MF_PAX_PAGEEXEC;
41744+#endif
41745+
41746+#ifdef CONFIG_PAX_SEGMEXEC
41747+ if (elf_phdata->p_flags & PF_SEGMEXEC)
41748+ pax_flags |= MF_PAX_SEGMEXEC;
41749+#endif
41750+
41751+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41752+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41753+ if ((__supported_pte_mask & _PAGE_NX))
41754+ pax_flags &= ~MF_PAX_SEGMEXEC;
41755+ else
41756+ pax_flags &= ~MF_PAX_PAGEEXEC;
41757+ }
41758+#endif
41759+
41760+#ifdef CONFIG_PAX_EMUTRAMP
41761+ if (elf_phdata->p_flags & PF_EMUTRAMP)
41762+ pax_flags |= MF_PAX_EMUTRAMP;
41763+#endif
41764+
41765+#ifdef CONFIG_PAX_MPROTECT
41766+ if (elf_phdata->p_flags & PF_MPROTECT)
41767+ pax_flags |= MF_PAX_MPROTECT;
41768+#endif
41769+
41770+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41771+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
41772+ pax_flags |= MF_PAX_RANDMMAP;
41773+#endif
41774+
41775+#endif
41776+
41777+ return pax_flags;
41778+}
41779+
41780+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
41781+{
41782+ unsigned long pax_flags = 0UL;
41783+
41784+#ifdef CONFIG_PAX_PT_PAX_FLAGS
41785+
41786+#ifdef CONFIG_PAX_PAGEEXEC
41787+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
41788+ pax_flags |= MF_PAX_PAGEEXEC;
41789+#endif
41790+
41791+#ifdef CONFIG_PAX_SEGMEXEC
41792+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
41793+ pax_flags |= MF_PAX_SEGMEXEC;
41794+#endif
41795+
41796+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41797+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41798+ if ((__supported_pte_mask & _PAGE_NX))
41799+ pax_flags &= ~MF_PAX_SEGMEXEC;
41800+ else
41801+ pax_flags &= ~MF_PAX_PAGEEXEC;
41802+ }
41803+#endif
41804+
41805+#ifdef CONFIG_PAX_EMUTRAMP
41806+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
41807+ pax_flags |= MF_PAX_EMUTRAMP;
41808+#endif
41809+
41810+#ifdef CONFIG_PAX_MPROTECT
41811+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
41812+ pax_flags |= MF_PAX_MPROTECT;
41813+#endif
41814+
41815+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41816+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
41817+ pax_flags |= MF_PAX_RANDMMAP;
41818+#endif
41819+
41820+#endif
41821+
41822+ return pax_flags;
41823+}
41824+
41825+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
41826+{
41827+ unsigned long pax_flags = 0UL;
41828+
41829+#ifdef CONFIG_PAX_EI_PAX
41830+
41831+#ifdef CONFIG_PAX_PAGEEXEC
41832+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
41833+ pax_flags |= MF_PAX_PAGEEXEC;
41834+#endif
41835+
41836+#ifdef CONFIG_PAX_SEGMEXEC
41837+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
41838+ pax_flags |= MF_PAX_SEGMEXEC;
41839+#endif
41840+
41841+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41842+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41843+ if ((__supported_pte_mask & _PAGE_NX))
41844+ pax_flags &= ~MF_PAX_SEGMEXEC;
41845+ else
41846+ pax_flags &= ~MF_PAX_PAGEEXEC;
41847+ }
41848+#endif
41849+
41850+#ifdef CONFIG_PAX_EMUTRAMP
41851+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
41852+ pax_flags |= MF_PAX_EMUTRAMP;
41853+#endif
41854+
41855+#ifdef CONFIG_PAX_MPROTECT
41856+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
41857+ pax_flags |= MF_PAX_MPROTECT;
41858+#endif
41859+
41860+#ifdef CONFIG_PAX_ASLR
41861+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
41862+ pax_flags |= MF_PAX_RANDMMAP;
41863+#endif
41864+
41865+#else
41866+
41867+#ifdef CONFIG_PAX_PAGEEXEC
41868+ pax_flags |= MF_PAX_PAGEEXEC;
41869+#endif
41870+
41871+#ifdef CONFIG_PAX_MPROTECT
41872+ pax_flags |= MF_PAX_MPROTECT;
41873+#endif
41874+
41875+#ifdef CONFIG_PAX_RANDMMAP
41876+ pax_flags |= MF_PAX_RANDMMAP;
41877+#endif
41878+
41879+#ifdef CONFIG_PAX_SEGMEXEC
41880+ if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
41881+ pax_flags &= ~MF_PAX_PAGEEXEC;
41882+ pax_flags |= MF_PAX_SEGMEXEC;
41883+ }
41884+#endif
41885+
41886+#endif
41887+
41888+ return pax_flags;
41889+}
41890+
41891+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
41892+{
41893+
41894+#ifdef CONFIG_PAX_PT_PAX_FLAGS
41895+ unsigned long i;
41896+
41897+ for (i = 0UL; i < elf_ex->e_phnum; i++)
41898+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
41899+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
41900+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
41901+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
41902+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
41903+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
41904+ return ~0UL;
41905+
41906+#ifdef CONFIG_PAX_SOFTMODE
41907+ if (pax_softmode)
41908+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
41909+ else
41910+#endif
41911+
41912+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
41913+ break;
41914+ }
41915+#endif
41916+
41917+ return ~0UL;
41918+}
41919+
41920+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
41921+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
41922+{
41923+ unsigned long pax_flags = 0UL;
41924+
41925+#ifdef CONFIG_PAX_PAGEEXEC
41926+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
41927+ pax_flags |= MF_PAX_PAGEEXEC;
41928+#endif
41929+
41930+#ifdef CONFIG_PAX_SEGMEXEC
41931+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
41932+ pax_flags |= MF_PAX_SEGMEXEC;
41933+#endif
41934+
41935+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41936+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41937+ if ((__supported_pte_mask & _PAGE_NX))
41938+ pax_flags &= ~MF_PAX_SEGMEXEC;
41939+ else
41940+ pax_flags &= ~MF_PAX_PAGEEXEC;
41941+ }
41942+#endif
41943+
41944+#ifdef CONFIG_PAX_EMUTRAMP
41945+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
41946+ pax_flags |= MF_PAX_EMUTRAMP;
41947+#endif
41948+
41949+#ifdef CONFIG_PAX_MPROTECT
41950+ if (pax_flags_softmode & MF_PAX_MPROTECT)
41951+ pax_flags |= MF_PAX_MPROTECT;
41952+#endif
41953+
41954+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41955+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
41956+ pax_flags |= MF_PAX_RANDMMAP;
41957+#endif
41958+
41959+ return pax_flags;
41960+}
41961+
41962+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
41963+{
41964+ unsigned long pax_flags = 0UL;
41965+
41966+#ifdef CONFIG_PAX_PAGEEXEC
41967+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
41968+ pax_flags |= MF_PAX_PAGEEXEC;
41969+#endif
41970+
41971+#ifdef CONFIG_PAX_SEGMEXEC
41972+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
41973+ pax_flags |= MF_PAX_SEGMEXEC;
41974+#endif
41975+
41976+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41977+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41978+ if ((__supported_pte_mask & _PAGE_NX))
41979+ pax_flags &= ~MF_PAX_SEGMEXEC;
41980+ else
41981+ pax_flags &= ~MF_PAX_PAGEEXEC;
41982+ }
41983+#endif
41984+
41985+#ifdef CONFIG_PAX_EMUTRAMP
41986+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
41987+ pax_flags |= MF_PAX_EMUTRAMP;
41988+#endif
41989+
41990+#ifdef CONFIG_PAX_MPROTECT
41991+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
41992+ pax_flags |= MF_PAX_MPROTECT;
41993+#endif
41994+
41995+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41996+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
41997+ pax_flags |= MF_PAX_RANDMMAP;
41998+#endif
41999+
42000+ return pax_flags;
42001+}
42002+#endif
42003+
42004+static unsigned long pax_parse_xattr_pax(struct file * const file)
42005+{
42006+
42007+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
42008+ ssize_t xattr_size, i;
42009+ unsigned char xattr_value[5];
42010+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
42011+
42012+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
42013+ if (xattr_size <= 0)
42014+ return ~0UL;
42015+
42016+ for (i = 0; i < xattr_size; i++)
42017+ switch (xattr_value[i]) {
42018+ default:
42019+ return ~0UL;
42020+
42021+#define parse_flag(option1, option2, flag) \
42022+ case option1: \
42023+ pax_flags_hardmode |= MF_PAX_##flag; \
42024+ break; \
42025+ case option2: \
42026+ pax_flags_softmode |= MF_PAX_##flag; \
42027+ break;
42028+
42029+ parse_flag('p', 'P', PAGEEXEC);
42030+ parse_flag('e', 'E', EMUTRAMP);
42031+ parse_flag('m', 'M', MPROTECT);
42032+ parse_flag('r', 'R', RANDMMAP);
42033+ parse_flag('s', 'S', SEGMEXEC);
42034+
42035+#undef parse_flag
42036+ }
42037+
42038+ if (pax_flags_hardmode & pax_flags_softmode)
42039+ return ~0UL;
42040+
42041+#ifdef CONFIG_PAX_SOFTMODE
42042+ if (pax_softmode)
42043+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
42044+ else
42045+#endif
42046+
42047+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
42048+#else
42049+ return ~0UL;
42050+#endif
42051+
42052+}
42053+
42054+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
42055+{
42056+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
42057+
42058+ pax_flags = pax_parse_ei_pax(elf_ex);
42059+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
42060+ xattr_pax_flags = pax_parse_xattr_pax(file);
42061+
42062+ if (pt_pax_flags == ~0UL)
42063+ pt_pax_flags = xattr_pax_flags;
42064+ else if (xattr_pax_flags == ~0UL)
42065+ xattr_pax_flags = pt_pax_flags;
42066+ if (pt_pax_flags != xattr_pax_flags)
42067+ return -EINVAL;
42068+ if (pt_pax_flags != ~0UL)
42069+ pax_flags = pt_pax_flags;
42070+
42071+ if (0 > pax_check_flags(&pax_flags))
42072+ return -EINVAL;
42073+
42074+ current->mm->pax_flags = pax_flags;
42075+ return 0;
42076+}
42077+#endif
42078+
42079 /*
42080 * These are the functions used to load ELF style executables and shared
42081 * libraries. There is no binary dependent code anywhere else.
42082@@ -541,6 +910,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
42083 {
42084 unsigned int random_variable = 0;
42085
42086+#ifdef CONFIG_PAX_RANDUSTACK
42087+ if (randomize_va_space)
42088+ return stack_top - current->mm->delta_stack;
42089+#endif
42090+
42091 if ((current->flags & PF_RANDOMIZE) &&
42092 !(current->personality & ADDR_NO_RANDOMIZE)) {
42093 random_variable = get_random_int() & STACK_RND_MASK;
42094@@ -559,7 +933,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42095 unsigned long load_addr = 0, load_bias = 0;
42096 int load_addr_set = 0;
42097 char * elf_interpreter = NULL;
42098- unsigned long error;
42099+ unsigned long error = 0;
42100 struct elf_phdr *elf_ppnt, *elf_phdata;
42101 unsigned long elf_bss, elf_brk;
42102 int retval, i;
42103@@ -569,11 +943,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42104 unsigned long start_code, end_code, start_data, end_data;
42105 unsigned long reloc_func_desc __maybe_unused = 0;
42106 int executable_stack = EXSTACK_DEFAULT;
42107- unsigned long def_flags = 0;
42108 struct {
42109 struct elfhdr elf_ex;
42110 struct elfhdr interp_elf_ex;
42111 } *loc;
42112+ unsigned long pax_task_size = TASK_SIZE;
42113
42114 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
42115 if (!loc) {
42116@@ -709,11 +1083,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42117 goto out_free_dentry;
42118
42119 /* OK, This is the point of no return */
42120- current->mm->def_flags = def_flags;
42121+
42122+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42123+ current->mm->pax_flags = 0UL;
42124+#endif
42125+
42126+#ifdef CONFIG_PAX_DLRESOLVE
42127+ current->mm->call_dl_resolve = 0UL;
42128+#endif
42129+
42130+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
42131+ current->mm->call_syscall = 0UL;
42132+#endif
42133+
42134+#ifdef CONFIG_PAX_ASLR
42135+ current->mm->delta_mmap = 0UL;
42136+ current->mm->delta_stack = 0UL;
42137+#endif
42138+
42139+ current->mm->def_flags = 0;
42140+
42141+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
42142+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
42143+ send_sig(SIGKILL, current, 0);
42144+ goto out_free_dentry;
42145+ }
42146+#endif
42147+
42148+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
42149+ pax_set_initial_flags(bprm);
42150+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
42151+ if (pax_set_initial_flags_func)
42152+ (pax_set_initial_flags_func)(bprm);
42153+#endif
42154+
42155+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42156+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
42157+ current->mm->context.user_cs_limit = PAGE_SIZE;
42158+ current->mm->def_flags |= VM_PAGEEXEC;
42159+ }
42160+#endif
42161+
42162+#ifdef CONFIG_PAX_SEGMEXEC
42163+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
42164+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
42165+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
42166+ pax_task_size = SEGMEXEC_TASK_SIZE;
42167+ current->mm->def_flags |= VM_NOHUGEPAGE;
42168+ }
42169+#endif
42170+
42171+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
42172+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42173+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
42174+ put_cpu();
42175+ }
42176+#endif
42177
42178 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
42179 may depend on the personality. */
42180 SET_PERSONALITY(loc->elf_ex);
42181+
42182+#ifdef CONFIG_PAX_ASLR
42183+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42184+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
42185+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
42186+ }
42187+#endif
42188+
42189+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
42190+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42191+ executable_stack = EXSTACK_DISABLE_X;
42192+ current->personality &= ~READ_IMPLIES_EXEC;
42193+ } else
42194+#endif
42195+
42196 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
42197 current->personality |= READ_IMPLIES_EXEC;
42198
42199@@ -804,6 +1248,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42200 #else
42201 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
42202 #endif
42203+
42204+#ifdef CONFIG_PAX_RANDMMAP
42205+ /* PaX: randomize base address at the default exe base if requested */
42206+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
42207+#ifdef CONFIG_SPARC64
42208+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
42209+#else
42210+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
42211+#endif
42212+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
42213+ elf_flags |= MAP_FIXED;
42214+ }
42215+#endif
42216+
42217 }
42218
42219 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
42220@@ -836,9 +1294,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42221 * allowed task size. Note that p_filesz must always be
42222 * <= p_memsz so it is only necessary to check p_memsz.
42223 */
42224- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42225- elf_ppnt->p_memsz > TASK_SIZE ||
42226- TASK_SIZE - elf_ppnt->p_memsz < k) {
42227+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42228+ elf_ppnt->p_memsz > pax_task_size ||
42229+ pax_task_size - elf_ppnt->p_memsz < k) {
42230 /* set_brk can never work. Avoid overflows. */
42231 send_sig(SIGKILL, current, 0);
42232 retval = -EINVAL;
42233@@ -877,11 +1335,40 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42234 goto out_free_dentry;
42235 }
42236 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
42237- send_sig(SIGSEGV, current, 0);
42238- retval = -EFAULT; /* Nobody gets to see this, but.. */
42239- goto out_free_dentry;
42240+ /*
42241+ * This bss-zeroing can fail if the ELF
42242+ * file specifies odd protections. So
42243+ * we don't check the return value
42244+ */
42245 }
42246
42247+#ifdef CONFIG_PAX_RANDMMAP
42248+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42249+ unsigned long start, size;
42250+
42251+ start = ELF_PAGEALIGN(elf_brk);
42252+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
42253+ down_write(&current->mm->mmap_sem);
42254+ retval = -ENOMEM;
42255+ if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
42256+ unsigned long prot = PROT_NONE;
42257+
42258+ current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT;
42259+// if (current->personality & ADDR_NO_RANDOMIZE)
42260+// prot = PROT_READ;
42261+ start = do_mmap(NULL, start, size, prot, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
42262+ retval = IS_ERR_VALUE(start) ? start : 0;
42263+ }
42264+ up_write(&current->mm->mmap_sem);
42265+ if (retval == 0)
42266+ retval = set_brk(start + size, start + size + PAGE_SIZE);
42267+ if (retval < 0) {
42268+ send_sig(SIGKILL, current, 0);
42269+ goto out_free_dentry;
42270+ }
42271+ }
42272+#endif
42273+
42274 if (elf_interpreter) {
42275 unsigned long uninitialized_var(interp_map_addr);
42276
42277@@ -1109,7 +1596,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
42278 * Decide what to dump of a segment, part, all or none.
42279 */
42280 static unsigned long vma_dump_size(struct vm_area_struct *vma,
42281- unsigned long mm_flags)
42282+ unsigned long mm_flags, long signr)
42283 {
42284 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
42285
42286@@ -1146,7 +1633,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
42287 if (vma->vm_file == NULL)
42288 return 0;
42289
42290- if (FILTER(MAPPED_PRIVATE))
42291+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
42292 goto whole;
42293
42294 /*
42295@@ -1368,9 +1855,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
42296 {
42297 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
42298 int i = 0;
42299- do
42300+ do {
42301 i += 2;
42302- while (auxv[i - 2] != AT_NULL);
42303+ } while (auxv[i - 2] != AT_NULL);
42304 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
42305 }
42306
42307@@ -1892,14 +2379,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
42308 }
42309
42310 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
42311- unsigned long mm_flags)
42312+ struct coredump_params *cprm)
42313 {
42314 struct vm_area_struct *vma;
42315 size_t size = 0;
42316
42317 for (vma = first_vma(current, gate_vma); vma != NULL;
42318 vma = next_vma(vma, gate_vma))
42319- size += vma_dump_size(vma, mm_flags);
42320+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42321 return size;
42322 }
42323
42324@@ -1993,7 +2480,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42325
42326 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
42327
42328- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
42329+ offset += elf_core_vma_data_size(gate_vma, cprm);
42330 offset += elf_core_extra_data_size();
42331 e_shoff = offset;
42332
42333@@ -2007,10 +2494,12 @@ static int elf_core_dump(struct coredump_params *cprm)
42334 offset = dataoff;
42335
42336 size += sizeof(*elf);
42337+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42338 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
42339 goto end_coredump;
42340
42341 size += sizeof(*phdr4note);
42342+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42343 if (size > cprm->limit
42344 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
42345 goto end_coredump;
42346@@ -2024,7 +2513,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42347 phdr.p_offset = offset;
42348 phdr.p_vaddr = vma->vm_start;
42349 phdr.p_paddr = 0;
42350- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
42351+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42352 phdr.p_memsz = vma->vm_end - vma->vm_start;
42353 offset += phdr.p_filesz;
42354 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
42355@@ -2035,6 +2524,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42356 phdr.p_align = ELF_EXEC_PAGESIZE;
42357
42358 size += sizeof(phdr);
42359+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42360 if (size > cprm->limit
42361 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
42362 goto end_coredump;
42363@@ -2059,7 +2549,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42364 unsigned long addr;
42365 unsigned long end;
42366
42367- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
42368+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42369
42370 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
42371 struct page *page;
42372@@ -2068,6 +2558,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42373 page = get_dump_page(addr);
42374 if (page) {
42375 void *kaddr = kmap(page);
42376+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
42377 stop = ((size += PAGE_SIZE) > cprm->limit) ||
42378 !dump_write(cprm->file, kaddr,
42379 PAGE_SIZE);
42380@@ -2085,6 +2576,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42381
42382 if (e_phnum == PN_XNUM) {
42383 size += sizeof(*shdr4extnum);
42384+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42385 if (size > cprm->limit
42386 || !dump_write(cprm->file, shdr4extnum,
42387 sizeof(*shdr4extnum)))
42388@@ -2105,6 +2597,97 @@ out:
42389
42390 #endif /* CONFIG_ELF_CORE */
42391
42392+#ifdef CONFIG_PAX_MPROTECT
42393+/* PaX: non-PIC ELF libraries need relocations on their executable segments
42394+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
42395+ * we'll remove VM_MAYWRITE for good on RELRO segments.
42396+ *
42397+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
42398+ * basis because we want to allow the common case and not the special ones.
42399+ */
42400+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
42401+{
42402+ struct elfhdr elf_h;
42403+ struct elf_phdr elf_p;
42404+ unsigned long i;
42405+ unsigned long oldflags;
42406+ bool is_textrel_rw, is_textrel_rx, is_relro;
42407+
42408+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
42409+ return;
42410+
42411+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
42412+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
42413+
42414+#ifdef CONFIG_PAX_ELFRELOCS
42415+ /* possible TEXTREL */
42416+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
42417+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
42418+#else
42419+ is_textrel_rw = false;
42420+ is_textrel_rx = false;
42421+#endif
42422+
42423+ /* possible RELRO */
42424+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
42425+
42426+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
42427+ return;
42428+
42429+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
42430+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
42431+
42432+#ifdef CONFIG_PAX_ETEXECRELOCS
42433+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42434+#else
42435+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
42436+#endif
42437+
42438+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42439+ !elf_check_arch(&elf_h) ||
42440+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
42441+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
42442+ return;
42443+
42444+ for (i = 0UL; i < elf_h.e_phnum; i++) {
42445+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
42446+ return;
42447+ switch (elf_p.p_type) {
42448+ case PT_DYNAMIC:
42449+ if (!is_textrel_rw && !is_textrel_rx)
42450+ continue;
42451+ i = 0UL;
42452+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
42453+ elf_dyn dyn;
42454+
42455+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
42456+ return;
42457+ if (dyn.d_tag == DT_NULL)
42458+ return;
42459+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
42460+ gr_log_textrel(vma);
42461+ if (is_textrel_rw)
42462+ vma->vm_flags |= VM_MAYWRITE;
42463+ else
42464+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
42465+ vma->vm_flags &= ~VM_MAYWRITE;
42466+ return;
42467+ }
42468+ i++;
42469+ }
42470+ return;
42471+
42472+ case PT_GNU_RELRO:
42473+ if (!is_relro)
42474+ continue;
42475+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
42476+ vma->vm_flags &= ~VM_MAYWRITE;
42477+ return;
42478+ }
42479+ }
42480+}
42481+#endif
42482+
42483 static int __init init_elf_binfmt(void)
42484 {
42485 register_binfmt(&elf_format);
42486diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
42487index 6b2daf9..a70dccb 100644
42488--- a/fs/binfmt_flat.c
42489+++ b/fs/binfmt_flat.c
42490@@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
42491 realdatastart = (unsigned long) -ENOMEM;
42492 printk("Unable to allocate RAM for process data, errno %d\n",
42493 (int)-realdatastart);
42494+ down_write(&current->mm->mmap_sem);
42495 do_munmap(current->mm, textpos, text_len);
42496+ up_write(&current->mm->mmap_sem);
42497 ret = realdatastart;
42498 goto err;
42499 }
42500@@ -586,8 +588,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42501 }
42502 if (IS_ERR_VALUE(result)) {
42503 printk("Unable to read data+bss, errno %d\n", (int)-result);
42504+ down_write(&current->mm->mmap_sem);
42505 do_munmap(current->mm, textpos, text_len);
42506 do_munmap(current->mm, realdatastart, len);
42507+ up_write(&current->mm->mmap_sem);
42508 ret = result;
42509 goto err;
42510 }
42511@@ -654,8 +658,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42512 }
42513 if (IS_ERR_VALUE(result)) {
42514 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
42515+ down_write(&current->mm->mmap_sem);
42516 do_munmap(current->mm, textpos, text_len + data_len + extra +
42517 MAX_SHARED_LIBS * sizeof(unsigned long));
42518+ up_write(&current->mm->mmap_sem);
42519 ret = result;
42520 goto err;
42521 }
42522diff --git a/fs/bio.c b/fs/bio.c
42523index 84da885..2149cd9 100644
42524--- a/fs/bio.c
42525+++ b/fs/bio.c
42526@@ -838,7 +838,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
42527 /*
42528 * Overflow, abort
42529 */
42530- if (end < start)
42531+ if (end < start || end - start > INT_MAX - nr_pages)
42532 return ERR_PTR(-EINVAL);
42533
42534 nr_pages += end - start;
42535@@ -1234,7 +1234,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
42536 const int read = bio_data_dir(bio) == READ;
42537 struct bio_map_data *bmd = bio->bi_private;
42538 int i;
42539- char *p = bmd->sgvecs[0].iov_base;
42540+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
42541
42542 __bio_for_each_segment(bvec, bio, i, 0) {
42543 char *addr = page_address(bvec->bv_page);
42544diff --git a/fs/block_dev.c b/fs/block_dev.c
42545index ba11c30..623d736 100644
42546--- a/fs/block_dev.c
42547+++ b/fs/block_dev.c
42548@@ -704,7 +704,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
42549 else if (bdev->bd_contains == bdev)
42550 return true; /* is a whole device which isn't held */
42551
42552- else if (whole->bd_holder == bd_may_claim)
42553+ else if (whole->bd_holder == (void *)bd_may_claim)
42554 return true; /* is a partition of a device that is being partitioned */
42555 else if (whole->bd_holder != NULL)
42556 return false; /* is a partition of a held device */
42557diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
42558index c053e90..e5f1afc 100644
42559--- a/fs/btrfs/check-integrity.c
42560+++ b/fs/btrfs/check-integrity.c
42561@@ -156,7 +156,7 @@ struct btrfsic_block {
42562 union {
42563 bio_end_io_t *bio;
42564 bh_end_io_t *bh;
42565- } orig_bio_bh_end_io;
42566+ } __no_const orig_bio_bh_end_io;
42567 int submit_bio_bh_rw;
42568 u64 flush_gen; /* only valid if !never_written */
42569 };
42570diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
42571index 4106264..8157ede 100644
42572--- a/fs/btrfs/ctree.c
42573+++ b/fs/btrfs/ctree.c
42574@@ -513,9 +513,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
42575 free_extent_buffer(buf);
42576 add_root_to_dirty_list(root);
42577 } else {
42578- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
42579- parent_start = parent->start;
42580- else
42581+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
42582+ if (parent)
42583+ parent_start = parent->start;
42584+ else
42585+ parent_start = 0;
42586+ } else
42587 parent_start = 0;
42588
42589 WARN_ON(trans->transid != btrfs_header_generation(parent));
42590diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
42591index 0df0d1f..4bdcbfe 100644
42592--- a/fs/btrfs/inode.c
42593+++ b/fs/btrfs/inode.c
42594@@ -7074,7 +7074,7 @@ fail:
42595 return -ENOMEM;
42596 }
42597
42598-static int btrfs_getattr(struct vfsmount *mnt,
42599+int btrfs_getattr(struct vfsmount *mnt,
42600 struct dentry *dentry, struct kstat *stat)
42601 {
42602 struct inode *inode = dentry->d_inode;
42603@@ -7088,6 +7088,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
42604 return 0;
42605 }
42606
42607+EXPORT_SYMBOL(btrfs_getattr);
42608+
42609+dev_t get_btrfs_dev_from_inode(struct inode *inode)
42610+{
42611+ return BTRFS_I(inode)->root->anon_dev;
42612+}
42613+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
42614+
42615 /*
42616 * If a file is moved, it will inherit the cow and compression flags of the new
42617 * directory.
42618diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
42619index 14f8e1f..ab8d81f 100644
42620--- a/fs/btrfs/ioctl.c
42621+++ b/fs/btrfs/ioctl.c
42622@@ -2882,9 +2882,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42623 for (i = 0; i < num_types; i++) {
42624 struct btrfs_space_info *tmp;
42625
42626+ /* Don't copy in more than we allocated */
42627 if (!slot_count)
42628 break;
42629
42630+ slot_count--;
42631+
42632 info = NULL;
42633 rcu_read_lock();
42634 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
42635@@ -2906,15 +2909,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42636 memcpy(dest, &space, sizeof(space));
42637 dest++;
42638 space_args.total_spaces++;
42639- slot_count--;
42640 }
42641- if (!slot_count)
42642- break;
42643 }
42644 up_read(&info->groups_sem);
42645 }
42646
42647- user_dest = (struct btrfs_ioctl_space_info *)
42648+ user_dest = (struct btrfs_ioctl_space_info __user *)
42649 (arg + sizeof(struct btrfs_ioctl_space_args));
42650
42651 if (copy_to_user(user_dest, dest_orig, alloc_size))
42652diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
42653index 646ee21..f020f87 100644
42654--- a/fs/btrfs/relocation.c
42655+++ b/fs/btrfs/relocation.c
42656@@ -1268,7 +1268,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
42657 }
42658 spin_unlock(&rc->reloc_root_tree.lock);
42659
42660- BUG_ON((struct btrfs_root *)node->data != root);
42661+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
42662
42663 if (!del) {
42664 spin_lock(&rc->reloc_root_tree.lock);
42665diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
42666index 622f469..e8d2d55 100644
42667--- a/fs/cachefiles/bind.c
42668+++ b/fs/cachefiles/bind.c
42669@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
42670 args);
42671
42672 /* start by checking things over */
42673- ASSERT(cache->fstop_percent >= 0 &&
42674- cache->fstop_percent < cache->fcull_percent &&
42675+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
42676 cache->fcull_percent < cache->frun_percent &&
42677 cache->frun_percent < 100);
42678
42679- ASSERT(cache->bstop_percent >= 0 &&
42680- cache->bstop_percent < cache->bcull_percent &&
42681+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
42682 cache->bcull_percent < cache->brun_percent &&
42683 cache->brun_percent < 100);
42684
42685diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
42686index 0a1467b..6a53245 100644
42687--- a/fs/cachefiles/daemon.c
42688+++ b/fs/cachefiles/daemon.c
42689@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
42690 if (n > buflen)
42691 return -EMSGSIZE;
42692
42693- if (copy_to_user(_buffer, buffer, n) != 0)
42694+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
42695 return -EFAULT;
42696
42697 return n;
42698@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
42699 if (test_bit(CACHEFILES_DEAD, &cache->flags))
42700 return -EIO;
42701
42702- if (datalen < 0 || datalen > PAGE_SIZE - 1)
42703+ if (datalen > PAGE_SIZE - 1)
42704 return -EOPNOTSUPP;
42705
42706 /* drag the command string into the kernel so we can parse it */
42707@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
42708 if (args[0] != '%' || args[1] != '\0')
42709 return -EINVAL;
42710
42711- if (fstop < 0 || fstop >= cache->fcull_percent)
42712+ if (fstop >= cache->fcull_percent)
42713 return cachefiles_daemon_range_error(cache, args);
42714
42715 cache->fstop_percent = fstop;
42716@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
42717 if (args[0] != '%' || args[1] != '\0')
42718 return -EINVAL;
42719
42720- if (bstop < 0 || bstop >= cache->bcull_percent)
42721+ if (bstop >= cache->bcull_percent)
42722 return cachefiles_daemon_range_error(cache, args);
42723
42724 cache->bstop_percent = bstop;
42725diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
42726index bd6bc1b..b627b53 100644
42727--- a/fs/cachefiles/internal.h
42728+++ b/fs/cachefiles/internal.h
42729@@ -57,7 +57,7 @@ struct cachefiles_cache {
42730 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
42731 struct rb_root active_nodes; /* active nodes (can't be culled) */
42732 rwlock_t active_lock; /* lock for active_nodes */
42733- atomic_t gravecounter; /* graveyard uniquifier */
42734+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
42735 unsigned frun_percent; /* when to stop culling (% files) */
42736 unsigned fcull_percent; /* when to start culling (% files) */
42737 unsigned fstop_percent; /* when to stop allocating (% files) */
42738@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
42739 * proc.c
42740 */
42741 #ifdef CONFIG_CACHEFILES_HISTOGRAM
42742-extern atomic_t cachefiles_lookup_histogram[HZ];
42743-extern atomic_t cachefiles_mkdir_histogram[HZ];
42744-extern atomic_t cachefiles_create_histogram[HZ];
42745+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42746+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42747+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
42748
42749 extern int __init cachefiles_proc_init(void);
42750 extern void cachefiles_proc_cleanup(void);
42751 static inline
42752-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
42753+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
42754 {
42755 unsigned long jif = jiffies - start_jif;
42756 if (jif >= HZ)
42757 jif = HZ - 1;
42758- atomic_inc(&histogram[jif]);
42759+ atomic_inc_unchecked(&histogram[jif]);
42760 }
42761
42762 #else
42763diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
42764index 7f0771d..87d4f36 100644
42765--- a/fs/cachefiles/namei.c
42766+++ b/fs/cachefiles/namei.c
42767@@ -318,7 +318,7 @@ try_again:
42768 /* first step is to make up a grave dentry in the graveyard */
42769 sprintf(nbuffer, "%08x%08x",
42770 (uint32_t) get_seconds(),
42771- (uint32_t) atomic_inc_return(&cache->gravecounter));
42772+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
42773
42774 /* do the multiway lock magic */
42775 trap = lock_rename(cache->graveyard, dir);
42776diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
42777index eccd339..4c1d995 100644
42778--- a/fs/cachefiles/proc.c
42779+++ b/fs/cachefiles/proc.c
42780@@ -14,9 +14,9 @@
42781 #include <linux/seq_file.h>
42782 #include "internal.h"
42783
42784-atomic_t cachefiles_lookup_histogram[HZ];
42785-atomic_t cachefiles_mkdir_histogram[HZ];
42786-atomic_t cachefiles_create_histogram[HZ];
42787+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42788+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42789+atomic_unchecked_t cachefiles_create_histogram[HZ];
42790
42791 /*
42792 * display the latency histogram
42793@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
42794 return 0;
42795 default:
42796 index = (unsigned long) v - 3;
42797- x = atomic_read(&cachefiles_lookup_histogram[index]);
42798- y = atomic_read(&cachefiles_mkdir_histogram[index]);
42799- z = atomic_read(&cachefiles_create_histogram[index]);
42800+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
42801+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
42802+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
42803 if (x == 0 && y == 0 && z == 0)
42804 return 0;
42805
42806diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
42807index 0e3c092..818480e 100644
42808--- a/fs/cachefiles/rdwr.c
42809+++ b/fs/cachefiles/rdwr.c
42810@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
42811 old_fs = get_fs();
42812 set_fs(KERNEL_DS);
42813 ret = file->f_op->write(
42814- file, (const void __user *) data, len, &pos);
42815+ file, (const void __force_user *) data, len, &pos);
42816 set_fs(old_fs);
42817 kunmap(page);
42818 if (ret != len)
42819diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
42820index 3e8094b..cb3ff3d 100644
42821--- a/fs/ceph/dir.c
42822+++ b/fs/ceph/dir.c
42823@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
42824 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
42825 struct ceph_mds_client *mdsc = fsc->mdsc;
42826 unsigned frag = fpos_frag(filp->f_pos);
42827- int off = fpos_off(filp->f_pos);
42828+ unsigned int off = fpos_off(filp->f_pos);
42829 int err;
42830 u32 ftype;
42831 struct ceph_mds_reply_info_parsed *rinfo;
42832@@ -598,7 +598,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
42833 if (nd &&
42834 (nd->flags & LOOKUP_OPEN) &&
42835 !(nd->intent.open.flags & O_CREAT)) {
42836- int mode = nd->intent.open.create_mode & ~current->fs->umask;
42837+ int mode = nd->intent.open.create_mode & ~current_umask();
42838 return ceph_lookup_open(dir, dentry, nd, mode, 1);
42839 }
42840
42841diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
42842index 2704646..c581c91 100644
42843--- a/fs/cifs/cifs_debug.c
42844+++ b/fs/cifs/cifs_debug.c
42845@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
42846
42847 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
42848 #ifdef CONFIG_CIFS_STATS2
42849- atomic_set(&totBufAllocCount, 0);
42850- atomic_set(&totSmBufAllocCount, 0);
42851+ atomic_set_unchecked(&totBufAllocCount, 0);
42852+ atomic_set_unchecked(&totSmBufAllocCount, 0);
42853 #endif /* CONFIG_CIFS_STATS2 */
42854 spin_lock(&cifs_tcp_ses_lock);
42855 list_for_each(tmp1, &cifs_tcp_ses_list) {
42856@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
42857 tcon = list_entry(tmp3,
42858 struct cifs_tcon,
42859 tcon_list);
42860- atomic_set(&tcon->num_smbs_sent, 0);
42861- atomic_set(&tcon->num_writes, 0);
42862- atomic_set(&tcon->num_reads, 0);
42863- atomic_set(&tcon->num_oplock_brks, 0);
42864- atomic_set(&tcon->num_opens, 0);
42865- atomic_set(&tcon->num_posixopens, 0);
42866- atomic_set(&tcon->num_posixmkdirs, 0);
42867- atomic_set(&tcon->num_closes, 0);
42868- atomic_set(&tcon->num_deletes, 0);
42869- atomic_set(&tcon->num_mkdirs, 0);
42870- atomic_set(&tcon->num_rmdirs, 0);
42871- atomic_set(&tcon->num_renames, 0);
42872- atomic_set(&tcon->num_t2renames, 0);
42873- atomic_set(&tcon->num_ffirst, 0);
42874- atomic_set(&tcon->num_fnext, 0);
42875- atomic_set(&tcon->num_fclose, 0);
42876- atomic_set(&tcon->num_hardlinks, 0);
42877- atomic_set(&tcon->num_symlinks, 0);
42878- atomic_set(&tcon->num_locks, 0);
42879+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
42880+ atomic_set_unchecked(&tcon->num_writes, 0);
42881+ atomic_set_unchecked(&tcon->num_reads, 0);
42882+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
42883+ atomic_set_unchecked(&tcon->num_opens, 0);
42884+ atomic_set_unchecked(&tcon->num_posixopens, 0);
42885+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
42886+ atomic_set_unchecked(&tcon->num_closes, 0);
42887+ atomic_set_unchecked(&tcon->num_deletes, 0);
42888+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
42889+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
42890+ atomic_set_unchecked(&tcon->num_renames, 0);
42891+ atomic_set_unchecked(&tcon->num_t2renames, 0);
42892+ atomic_set_unchecked(&tcon->num_ffirst, 0);
42893+ atomic_set_unchecked(&tcon->num_fnext, 0);
42894+ atomic_set_unchecked(&tcon->num_fclose, 0);
42895+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
42896+ atomic_set_unchecked(&tcon->num_symlinks, 0);
42897+ atomic_set_unchecked(&tcon->num_locks, 0);
42898 }
42899 }
42900 }
42901@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
42902 smBufAllocCount.counter, cifs_min_small);
42903 #ifdef CONFIG_CIFS_STATS2
42904 seq_printf(m, "Total Large %d Small %d Allocations\n",
42905- atomic_read(&totBufAllocCount),
42906- atomic_read(&totSmBufAllocCount));
42907+ atomic_read_unchecked(&totBufAllocCount),
42908+ atomic_read_unchecked(&totSmBufAllocCount));
42909 #endif /* CONFIG_CIFS_STATS2 */
42910
42911 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
42912@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
42913 if (tcon->need_reconnect)
42914 seq_puts(m, "\tDISCONNECTED ");
42915 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
42916- atomic_read(&tcon->num_smbs_sent),
42917- atomic_read(&tcon->num_oplock_brks));
42918+ atomic_read_unchecked(&tcon->num_smbs_sent),
42919+ atomic_read_unchecked(&tcon->num_oplock_brks));
42920 seq_printf(m, "\nReads: %d Bytes: %lld",
42921- atomic_read(&tcon->num_reads),
42922+ atomic_read_unchecked(&tcon->num_reads),
42923 (long long)(tcon->bytes_read));
42924 seq_printf(m, "\nWrites: %d Bytes: %lld",
42925- atomic_read(&tcon->num_writes),
42926+ atomic_read_unchecked(&tcon->num_writes),
42927 (long long)(tcon->bytes_written));
42928 seq_printf(m, "\nFlushes: %d",
42929- atomic_read(&tcon->num_flushes));
42930+ atomic_read_unchecked(&tcon->num_flushes));
42931 seq_printf(m, "\nLocks: %d HardLinks: %d "
42932 "Symlinks: %d",
42933- atomic_read(&tcon->num_locks),
42934- atomic_read(&tcon->num_hardlinks),
42935- atomic_read(&tcon->num_symlinks));
42936+ atomic_read_unchecked(&tcon->num_locks),
42937+ atomic_read_unchecked(&tcon->num_hardlinks),
42938+ atomic_read_unchecked(&tcon->num_symlinks));
42939 seq_printf(m, "\nOpens: %d Closes: %d "
42940 "Deletes: %d",
42941- atomic_read(&tcon->num_opens),
42942- atomic_read(&tcon->num_closes),
42943- atomic_read(&tcon->num_deletes));
42944+ atomic_read_unchecked(&tcon->num_opens),
42945+ atomic_read_unchecked(&tcon->num_closes),
42946+ atomic_read_unchecked(&tcon->num_deletes));
42947 seq_printf(m, "\nPosix Opens: %d "
42948 "Posix Mkdirs: %d",
42949- atomic_read(&tcon->num_posixopens),
42950- atomic_read(&tcon->num_posixmkdirs));
42951+ atomic_read_unchecked(&tcon->num_posixopens),
42952+ atomic_read_unchecked(&tcon->num_posixmkdirs));
42953 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
42954- atomic_read(&tcon->num_mkdirs),
42955- atomic_read(&tcon->num_rmdirs));
42956+ atomic_read_unchecked(&tcon->num_mkdirs),
42957+ atomic_read_unchecked(&tcon->num_rmdirs));
42958 seq_printf(m, "\nRenames: %d T2 Renames %d",
42959- atomic_read(&tcon->num_renames),
42960- atomic_read(&tcon->num_t2renames));
42961+ atomic_read_unchecked(&tcon->num_renames),
42962+ atomic_read_unchecked(&tcon->num_t2renames));
42963 seq_printf(m, "\nFindFirst: %d FNext %d "
42964 "FClose %d",
42965- atomic_read(&tcon->num_ffirst),
42966- atomic_read(&tcon->num_fnext),
42967- atomic_read(&tcon->num_fclose));
42968+ atomic_read_unchecked(&tcon->num_ffirst),
42969+ atomic_read_unchecked(&tcon->num_fnext),
42970+ atomic_read_unchecked(&tcon->num_fclose));
42971 }
42972 }
42973 }
42974diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
42975index 541ef81..a78deb8 100644
42976--- a/fs/cifs/cifsfs.c
42977+++ b/fs/cifs/cifsfs.c
42978@@ -985,7 +985,7 @@ cifs_init_request_bufs(void)
42979 cifs_req_cachep = kmem_cache_create("cifs_request",
42980 CIFSMaxBufSize +
42981 MAX_CIFS_HDR_SIZE, 0,
42982- SLAB_HWCACHE_ALIGN, NULL);
42983+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
42984 if (cifs_req_cachep == NULL)
42985 return -ENOMEM;
42986
42987@@ -1012,7 +1012,7 @@ cifs_init_request_bufs(void)
42988 efficient to alloc 1 per page off the slab compared to 17K (5page)
42989 alloc of large cifs buffers even when page debugging is on */
42990 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
42991- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
42992+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
42993 NULL);
42994 if (cifs_sm_req_cachep == NULL) {
42995 mempool_destroy(cifs_req_poolp);
42996@@ -1097,8 +1097,8 @@ init_cifs(void)
42997 atomic_set(&bufAllocCount, 0);
42998 atomic_set(&smBufAllocCount, 0);
42999 #ifdef CONFIG_CIFS_STATS2
43000- atomic_set(&totBufAllocCount, 0);
43001- atomic_set(&totSmBufAllocCount, 0);
43002+ atomic_set_unchecked(&totBufAllocCount, 0);
43003+ atomic_set_unchecked(&totSmBufAllocCount, 0);
43004 #endif /* CONFIG_CIFS_STATS2 */
43005
43006 atomic_set(&midCount, 0);
43007diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
43008index 73fea28..b996b84 100644
43009--- a/fs/cifs/cifsglob.h
43010+++ b/fs/cifs/cifsglob.h
43011@@ -439,28 +439,28 @@ struct cifs_tcon {
43012 __u16 Flags; /* optional support bits */
43013 enum statusEnum tidStatus;
43014 #ifdef CONFIG_CIFS_STATS
43015- atomic_t num_smbs_sent;
43016- atomic_t num_writes;
43017- atomic_t num_reads;
43018- atomic_t num_flushes;
43019- atomic_t num_oplock_brks;
43020- atomic_t num_opens;
43021- atomic_t num_closes;
43022- atomic_t num_deletes;
43023- atomic_t num_mkdirs;
43024- atomic_t num_posixopens;
43025- atomic_t num_posixmkdirs;
43026- atomic_t num_rmdirs;
43027- atomic_t num_renames;
43028- atomic_t num_t2renames;
43029- atomic_t num_ffirst;
43030- atomic_t num_fnext;
43031- atomic_t num_fclose;
43032- atomic_t num_hardlinks;
43033- atomic_t num_symlinks;
43034- atomic_t num_locks;
43035- atomic_t num_acl_get;
43036- atomic_t num_acl_set;
43037+ atomic_unchecked_t num_smbs_sent;
43038+ atomic_unchecked_t num_writes;
43039+ atomic_unchecked_t num_reads;
43040+ atomic_unchecked_t num_flushes;
43041+ atomic_unchecked_t num_oplock_brks;
43042+ atomic_unchecked_t num_opens;
43043+ atomic_unchecked_t num_closes;
43044+ atomic_unchecked_t num_deletes;
43045+ atomic_unchecked_t num_mkdirs;
43046+ atomic_unchecked_t num_posixopens;
43047+ atomic_unchecked_t num_posixmkdirs;
43048+ atomic_unchecked_t num_rmdirs;
43049+ atomic_unchecked_t num_renames;
43050+ atomic_unchecked_t num_t2renames;
43051+ atomic_unchecked_t num_ffirst;
43052+ atomic_unchecked_t num_fnext;
43053+ atomic_unchecked_t num_fclose;
43054+ atomic_unchecked_t num_hardlinks;
43055+ atomic_unchecked_t num_symlinks;
43056+ atomic_unchecked_t num_locks;
43057+ atomic_unchecked_t num_acl_get;
43058+ atomic_unchecked_t num_acl_set;
43059 #ifdef CONFIG_CIFS_STATS2
43060 unsigned long long time_writes;
43061 unsigned long long time_reads;
43062@@ -677,7 +677,7 @@ convert_delimiter(char *path, char delim)
43063 }
43064
43065 #ifdef CONFIG_CIFS_STATS
43066-#define cifs_stats_inc atomic_inc
43067+#define cifs_stats_inc atomic_inc_unchecked
43068
43069 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
43070 unsigned int bytes)
43071@@ -1036,8 +1036,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
43072 /* Various Debug counters */
43073 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
43074 #ifdef CONFIG_CIFS_STATS2
43075-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
43076-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
43077+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
43078+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
43079 #endif
43080 GLOBAL_EXTERN atomic_t smBufAllocCount;
43081 GLOBAL_EXTERN atomic_t midCount;
43082diff --git a/fs/cifs/link.c b/fs/cifs/link.c
43083index 6b0e064..94e6c3c 100644
43084--- a/fs/cifs/link.c
43085+++ b/fs/cifs/link.c
43086@@ -600,7 +600,7 @@ symlink_exit:
43087
43088 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
43089 {
43090- char *p = nd_get_link(nd);
43091+ const char *p = nd_get_link(nd);
43092 if (!IS_ERR(p))
43093 kfree(p);
43094 }
43095diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
43096index c29d1aa..58018da 100644
43097--- a/fs/cifs/misc.c
43098+++ b/fs/cifs/misc.c
43099@@ -156,7 +156,7 @@ cifs_buf_get(void)
43100 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
43101 atomic_inc(&bufAllocCount);
43102 #ifdef CONFIG_CIFS_STATS2
43103- atomic_inc(&totBufAllocCount);
43104+ atomic_inc_unchecked(&totBufAllocCount);
43105 #endif /* CONFIG_CIFS_STATS2 */
43106 }
43107
43108@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
43109 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
43110 atomic_inc(&smBufAllocCount);
43111 #ifdef CONFIG_CIFS_STATS2
43112- atomic_inc(&totSmBufAllocCount);
43113+ atomic_inc_unchecked(&totSmBufAllocCount);
43114 #endif /* CONFIG_CIFS_STATS2 */
43115
43116 }
43117diff --git a/fs/coda/cache.c b/fs/coda/cache.c
43118index 6901578..d402eb5 100644
43119--- a/fs/coda/cache.c
43120+++ b/fs/coda/cache.c
43121@@ -24,7 +24,7 @@
43122 #include "coda_linux.h"
43123 #include "coda_cache.h"
43124
43125-static atomic_t permission_epoch = ATOMIC_INIT(0);
43126+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
43127
43128 /* replace or extend an acl cache hit */
43129 void coda_cache_enter(struct inode *inode, int mask)
43130@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
43131 struct coda_inode_info *cii = ITOC(inode);
43132
43133 spin_lock(&cii->c_lock);
43134- cii->c_cached_epoch = atomic_read(&permission_epoch);
43135+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
43136 if (cii->c_uid != current_fsuid()) {
43137 cii->c_uid = current_fsuid();
43138 cii->c_cached_perm = mask;
43139@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
43140 {
43141 struct coda_inode_info *cii = ITOC(inode);
43142 spin_lock(&cii->c_lock);
43143- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
43144+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
43145 spin_unlock(&cii->c_lock);
43146 }
43147
43148 /* remove all acl caches */
43149 void coda_cache_clear_all(struct super_block *sb)
43150 {
43151- atomic_inc(&permission_epoch);
43152+ atomic_inc_unchecked(&permission_epoch);
43153 }
43154
43155
43156@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
43157 spin_lock(&cii->c_lock);
43158 hit = (mask & cii->c_cached_perm) == mask &&
43159 cii->c_uid == current_fsuid() &&
43160- cii->c_cached_epoch == atomic_read(&permission_epoch);
43161+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
43162 spin_unlock(&cii->c_lock);
43163
43164 return hit;
43165diff --git a/fs/compat.c b/fs/compat.c
43166index f2944ac..62845d2 100644
43167--- a/fs/compat.c
43168+++ b/fs/compat.c
43169@@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
43170
43171 set_fs(KERNEL_DS);
43172 /* The __user pointer cast is valid because of the set_fs() */
43173- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
43174+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
43175 set_fs(oldfs);
43176 /* truncating is ok because it's a user address */
43177 if (!ret)
43178@@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
43179 goto out;
43180
43181 ret = -EINVAL;
43182- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
43183+ if (nr_segs > UIO_MAXIOV)
43184 goto out;
43185 if (nr_segs > fast_segs) {
43186 ret = -ENOMEM;
43187@@ -831,6 +831,7 @@ struct compat_old_linux_dirent {
43188
43189 struct compat_readdir_callback {
43190 struct compat_old_linux_dirent __user *dirent;
43191+ struct file * file;
43192 int result;
43193 };
43194
43195@@ -848,6 +849,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
43196 buf->result = -EOVERFLOW;
43197 return -EOVERFLOW;
43198 }
43199+
43200+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43201+ return 0;
43202+
43203 buf->result++;
43204 dirent = buf->dirent;
43205 if (!access_ok(VERIFY_WRITE, dirent,
43206@@ -880,6 +885,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
43207
43208 buf.result = 0;
43209 buf.dirent = dirent;
43210+ buf.file = file;
43211
43212 error = vfs_readdir(file, compat_fillonedir, &buf);
43213 if (buf.result)
43214@@ -900,6 +906,7 @@ struct compat_linux_dirent {
43215 struct compat_getdents_callback {
43216 struct compat_linux_dirent __user *current_dir;
43217 struct compat_linux_dirent __user *previous;
43218+ struct file * file;
43219 int count;
43220 int error;
43221 };
43222@@ -921,6 +928,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
43223 buf->error = -EOVERFLOW;
43224 return -EOVERFLOW;
43225 }
43226+
43227+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43228+ return 0;
43229+
43230 dirent = buf->previous;
43231 if (dirent) {
43232 if (__put_user(offset, &dirent->d_off))
43233@@ -968,6 +979,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
43234 buf.previous = NULL;
43235 buf.count = count;
43236 buf.error = 0;
43237+ buf.file = file;
43238
43239 error = vfs_readdir(file, compat_filldir, &buf);
43240 if (error >= 0)
43241@@ -989,6 +1001,7 @@ out:
43242 struct compat_getdents_callback64 {
43243 struct linux_dirent64 __user *current_dir;
43244 struct linux_dirent64 __user *previous;
43245+ struct file * file;
43246 int count;
43247 int error;
43248 };
43249@@ -1005,6 +1018,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
43250 buf->error = -EINVAL; /* only used if we fail.. */
43251 if (reclen > buf->count)
43252 return -EINVAL;
43253+
43254+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43255+ return 0;
43256+
43257 dirent = buf->previous;
43258
43259 if (dirent) {
43260@@ -1056,13 +1073,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
43261 buf.previous = NULL;
43262 buf.count = count;
43263 buf.error = 0;
43264+ buf.file = file;
43265
43266 error = vfs_readdir(file, compat_filldir64, &buf);
43267 if (error >= 0)
43268 error = buf.error;
43269 lastdirent = buf.previous;
43270 if (lastdirent) {
43271- typeof(lastdirent->d_off) d_off = file->f_pos;
43272+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
43273 if (__put_user_unaligned(d_off, &lastdirent->d_off))
43274 error = -EFAULT;
43275 else
43276diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
43277index 112e45a..b59845b 100644
43278--- a/fs/compat_binfmt_elf.c
43279+++ b/fs/compat_binfmt_elf.c
43280@@ -30,11 +30,13 @@
43281 #undef elf_phdr
43282 #undef elf_shdr
43283 #undef elf_note
43284+#undef elf_dyn
43285 #undef elf_addr_t
43286 #define elfhdr elf32_hdr
43287 #define elf_phdr elf32_phdr
43288 #define elf_shdr elf32_shdr
43289 #define elf_note elf32_note
43290+#define elf_dyn Elf32_Dyn
43291 #define elf_addr_t Elf32_Addr
43292
43293 /*
43294diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
43295index debdfe0..75d31d4 100644
43296--- a/fs/compat_ioctl.c
43297+++ b/fs/compat_ioctl.c
43298@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
43299
43300 err = get_user(palp, &up->palette);
43301 err |= get_user(length, &up->length);
43302+ if (err)
43303+ return -EFAULT;
43304
43305 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
43306 err = put_user(compat_ptr(palp), &up_native->palette);
43307@@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
43308 return -EFAULT;
43309 if (__get_user(udata, &ss32->iomem_base))
43310 return -EFAULT;
43311- ss.iomem_base = compat_ptr(udata);
43312+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
43313 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
43314 __get_user(ss.port_high, &ss32->port_high))
43315 return -EFAULT;
43316@@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
43317 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
43318 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
43319 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
43320- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43321+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43322 return -EFAULT;
43323
43324 return ioctl_preallocate(file, p);
43325@@ -1610,8 +1612,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
43326 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
43327 {
43328 unsigned int a, b;
43329- a = *(unsigned int *)p;
43330- b = *(unsigned int *)q;
43331+ a = *(const unsigned int *)p;
43332+ b = *(const unsigned int *)q;
43333 if (a > b)
43334 return 1;
43335 if (a < b)
43336diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
43337index 7e6c52d..94bc756 100644
43338--- a/fs/configfs/dir.c
43339+++ b/fs/configfs/dir.c
43340@@ -1564,7 +1564,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43341 }
43342 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
43343 struct configfs_dirent *next;
43344- const char * name;
43345+ const unsigned char * name;
43346+ char d_name[sizeof(next->s_dentry->d_iname)];
43347 int len;
43348 struct inode *inode = NULL;
43349
43350@@ -1574,7 +1575,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43351 continue;
43352
43353 name = configfs_get_name(next);
43354- len = strlen(name);
43355+ if (next->s_dentry && name == next->s_dentry->d_iname) {
43356+ len = next->s_dentry->d_name.len;
43357+ memcpy(d_name, name, len);
43358+ name = d_name;
43359+ } else
43360+ len = strlen(name);
43361
43362 /*
43363 * We'll have a dentry and an inode for
43364diff --git a/fs/dcache.c b/fs/dcache.c
43365index b80531c..8ca7e2d 100644
43366--- a/fs/dcache.c
43367+++ b/fs/dcache.c
43368@@ -3084,7 +3084,7 @@ void __init vfs_caches_init(unsigned long mempages)
43369 mempages -= reserve;
43370
43371 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
43372- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
43373+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
43374
43375 dcache_init();
43376 inode_init();
43377diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
43378index b80bc84..0d46d1a 100644
43379--- a/fs/debugfs/inode.c
43380+++ b/fs/debugfs/inode.c
43381@@ -408,7 +408,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
43382 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
43383 {
43384 return debugfs_create_file(name,
43385+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
43386+ S_IFDIR | S_IRWXU,
43387+#else
43388 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
43389+#endif
43390 parent, NULL, NULL);
43391 }
43392 EXPORT_SYMBOL_GPL(debugfs_create_dir);
43393diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
43394index ab35b11..b30af66 100644
43395--- a/fs/ecryptfs/inode.c
43396+++ b/fs/ecryptfs/inode.c
43397@@ -672,7 +672,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
43398 old_fs = get_fs();
43399 set_fs(get_ds());
43400 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
43401- (char __user *)lower_buf,
43402+ (char __force_user *)lower_buf,
43403 lower_bufsiz);
43404 set_fs(old_fs);
43405 if (rc < 0)
43406@@ -718,7 +718,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
43407 }
43408 old_fs = get_fs();
43409 set_fs(get_ds());
43410- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
43411+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
43412 set_fs(old_fs);
43413 if (rc < 0) {
43414 kfree(buf);
43415@@ -733,7 +733,7 @@ out:
43416 static void
43417 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
43418 {
43419- char *buf = nd_get_link(nd);
43420+ const char *buf = nd_get_link(nd);
43421 if (!IS_ERR(buf)) {
43422 /* Free the char* */
43423 kfree(buf);
43424diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
43425index 3a06f40..f7af544 100644
43426--- a/fs/ecryptfs/miscdev.c
43427+++ b/fs/ecryptfs/miscdev.c
43428@@ -345,7 +345,7 @@ check_list:
43429 goto out_unlock_msg_ctx;
43430 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
43431 if (msg_ctx->msg) {
43432- if (copy_to_user(&buf[i], packet_length, packet_length_size))
43433+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
43434 goto out_unlock_msg_ctx;
43435 i += packet_length_size;
43436 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
43437diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
43438index b2a34a1..162fa69 100644
43439--- a/fs/ecryptfs/read_write.c
43440+++ b/fs/ecryptfs/read_write.c
43441@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
43442 return -EIO;
43443 fs_save = get_fs();
43444 set_fs(get_ds());
43445- rc = vfs_write(lower_file, data, size, &offset);
43446+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
43447 set_fs(fs_save);
43448 mark_inode_dirty_sync(ecryptfs_inode);
43449 return rc;
43450@@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
43451 return -EIO;
43452 fs_save = get_fs();
43453 set_fs(get_ds());
43454- rc = vfs_read(lower_file, data, size, &offset);
43455+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
43456 set_fs(fs_save);
43457 return rc;
43458 }
43459diff --git a/fs/exec.c b/fs/exec.c
43460index b1fd202..582240d 100644
43461--- a/fs/exec.c
43462+++ b/fs/exec.c
43463@@ -55,6 +55,15 @@
43464 #include <linux/pipe_fs_i.h>
43465 #include <linux/oom.h>
43466 #include <linux/compat.h>
43467+#include <linux/random.h>
43468+#include <linux/seq_file.h>
43469+
43470+#ifdef CONFIG_PAX_REFCOUNT
43471+#include <linux/kallsyms.h>
43472+#include <linux/kdebug.h>
43473+#endif
43474+
43475+#include <trace/events/fs.h>
43476
43477 #include <asm/uaccess.h>
43478 #include <asm/mmu_context.h>
43479@@ -66,6 +75,18 @@
43480
43481 #include <trace/events/sched.h>
43482
43483+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
43484+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
43485+{
43486+ WARN_ONCE(1, "PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
43487+}
43488+#endif
43489+
43490+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
43491+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
43492+EXPORT_SYMBOL(pax_set_initial_flags_func);
43493+#endif
43494+
43495 int core_uses_pid;
43496 char core_pattern[CORENAME_MAX_SIZE] = "core";
43497 unsigned int core_pipe_limit;
43498@@ -75,7 +96,7 @@ struct core_name {
43499 char *corename;
43500 int used, size;
43501 };
43502-static atomic_t call_count = ATOMIC_INIT(1);
43503+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
43504
43505 /* The maximal length of core_pattern is also specified in sysctl.c */
43506
43507@@ -191,18 +212,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43508 int write)
43509 {
43510 struct page *page;
43511- int ret;
43512
43513-#ifdef CONFIG_STACK_GROWSUP
43514- if (write) {
43515- ret = expand_downwards(bprm->vma, pos);
43516- if (ret < 0)
43517- return NULL;
43518- }
43519-#endif
43520- ret = get_user_pages(current, bprm->mm, pos,
43521- 1, write, 1, &page, NULL);
43522- if (ret <= 0)
43523+ if (0 > expand_downwards(bprm->vma, pos))
43524+ return NULL;
43525+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
43526 return NULL;
43527
43528 if (write) {
43529@@ -218,6 +231,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43530 if (size <= ARG_MAX)
43531 return page;
43532
43533+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43534+ // only allow 512KB for argv+env on suid/sgid binaries
43535+ // to prevent easy ASLR exhaustion
43536+ if (((bprm->cred->euid != current_euid()) ||
43537+ (bprm->cred->egid != current_egid())) &&
43538+ (size > (512 * 1024))) {
43539+ put_page(page);
43540+ return NULL;
43541+ }
43542+#endif
43543+
43544 /*
43545 * Limit to 1/4-th the stack size for the argv+env strings.
43546 * This ensures that:
43547@@ -277,6 +301,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43548 vma->vm_end = STACK_TOP_MAX;
43549 vma->vm_start = vma->vm_end - PAGE_SIZE;
43550 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
43551+
43552+#ifdef CONFIG_PAX_SEGMEXEC
43553+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
43554+#endif
43555+
43556 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
43557 INIT_LIST_HEAD(&vma->anon_vma_chain);
43558
43559@@ -291,6 +320,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43560 mm->stack_vm = mm->total_vm = 1;
43561 up_write(&mm->mmap_sem);
43562 bprm->p = vma->vm_end - sizeof(void *);
43563+
43564+#ifdef CONFIG_PAX_RANDUSTACK
43565+ if (randomize_va_space)
43566+ bprm->p ^= random32() & ~PAGE_MASK;
43567+#endif
43568+
43569 return 0;
43570 err:
43571 up_write(&mm->mmap_sem);
43572@@ -399,19 +434,7 @@ err:
43573 return err;
43574 }
43575
43576-struct user_arg_ptr {
43577-#ifdef CONFIG_COMPAT
43578- bool is_compat;
43579-#endif
43580- union {
43581- const char __user *const __user *native;
43582-#ifdef CONFIG_COMPAT
43583- compat_uptr_t __user *compat;
43584-#endif
43585- } ptr;
43586-};
43587-
43588-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43589+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43590 {
43591 const char __user *native;
43592
43593@@ -420,14 +443,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43594 compat_uptr_t compat;
43595
43596 if (get_user(compat, argv.ptr.compat + nr))
43597- return ERR_PTR(-EFAULT);
43598+ return (const char __force_user *)ERR_PTR(-EFAULT);
43599
43600 return compat_ptr(compat);
43601 }
43602 #endif
43603
43604 if (get_user(native, argv.ptr.native + nr))
43605- return ERR_PTR(-EFAULT);
43606+ return (const char __force_user *)ERR_PTR(-EFAULT);
43607
43608 return native;
43609 }
43610@@ -446,7 +469,7 @@ static int count(struct user_arg_ptr argv, int max)
43611 if (!p)
43612 break;
43613
43614- if (IS_ERR(p))
43615+ if (IS_ERR((const char __force_kernel *)p))
43616 return -EFAULT;
43617
43618 if (i++ >= max)
43619@@ -480,7 +503,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
43620
43621 ret = -EFAULT;
43622 str = get_user_arg_ptr(argv, argc);
43623- if (IS_ERR(str))
43624+ if (IS_ERR((const char __force_kernel *)str))
43625 goto out;
43626
43627 len = strnlen_user(str, MAX_ARG_STRLEN);
43628@@ -562,7 +585,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
43629 int r;
43630 mm_segment_t oldfs = get_fs();
43631 struct user_arg_ptr argv = {
43632- .ptr.native = (const char __user *const __user *)__argv,
43633+ .ptr.native = (const char __force_user *const __force_user *)__argv,
43634 };
43635
43636 set_fs(KERNEL_DS);
43637@@ -597,7 +620,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43638 unsigned long new_end = old_end - shift;
43639 struct mmu_gather tlb;
43640
43641- BUG_ON(new_start > new_end);
43642+ if (new_start >= new_end || new_start < mmap_min_addr)
43643+ return -ENOMEM;
43644
43645 /*
43646 * ensure there are no vmas between where we want to go
43647@@ -606,6 +630,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43648 if (vma != find_vma(mm, new_start))
43649 return -EFAULT;
43650
43651+#ifdef CONFIG_PAX_SEGMEXEC
43652+ BUG_ON(pax_find_mirror_vma(vma));
43653+#endif
43654+
43655 /*
43656 * cover the whole range: [new_start, old_end)
43657 */
43658@@ -686,10 +714,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43659 stack_top = arch_align_stack(stack_top);
43660 stack_top = PAGE_ALIGN(stack_top);
43661
43662- if (unlikely(stack_top < mmap_min_addr) ||
43663- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
43664- return -ENOMEM;
43665-
43666 stack_shift = vma->vm_end - stack_top;
43667
43668 bprm->p -= stack_shift;
43669@@ -701,8 +725,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
43670 bprm->exec -= stack_shift;
43671
43672 down_write(&mm->mmap_sem);
43673+
43674+ /* Move stack pages down in memory. */
43675+ if (stack_shift) {
43676+ ret = shift_arg_pages(vma, stack_shift);
43677+ if (ret)
43678+ goto out_unlock;
43679+ }
43680+
43681 vm_flags = VM_STACK_FLAGS;
43682
43683+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43684+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43685+ vm_flags &= ~VM_EXEC;
43686+
43687+#ifdef CONFIG_PAX_MPROTECT
43688+ if (mm->pax_flags & MF_PAX_MPROTECT)
43689+ vm_flags &= ~VM_MAYEXEC;
43690+#endif
43691+
43692+ }
43693+#endif
43694+
43695 /*
43696 * Adjust stack execute permissions; explicitly enable for
43697 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
43698@@ -721,13 +765,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43699 goto out_unlock;
43700 BUG_ON(prev != vma);
43701
43702- /* Move stack pages down in memory. */
43703- if (stack_shift) {
43704- ret = shift_arg_pages(vma, stack_shift);
43705- if (ret)
43706- goto out_unlock;
43707- }
43708-
43709 /* mprotect_fixup is overkill to remove the temporary stack flags */
43710 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
43711
43712@@ -785,6 +822,8 @@ struct file *open_exec(const char *name)
43713
43714 fsnotify_open(file);
43715
43716+ trace_open_exec(name);
43717+
43718 err = deny_write_access(file);
43719 if (err)
43720 goto exit;
43721@@ -808,7 +847,7 @@ int kernel_read(struct file *file, loff_t offset,
43722 old_fs = get_fs();
43723 set_fs(get_ds());
43724 /* The cast to a user pointer is valid due to the set_fs() */
43725- result = vfs_read(file, (void __user *)addr, count, &pos);
43726+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
43727 set_fs(old_fs);
43728 return result;
43729 }
43730@@ -1254,7 +1293,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
43731 }
43732 rcu_read_unlock();
43733
43734- if (p->fs->users > n_fs) {
43735+ if (atomic_read(&p->fs->users) > n_fs) {
43736 bprm->unsafe |= LSM_UNSAFE_SHARE;
43737 } else {
43738 res = -EAGAIN;
43739@@ -1451,6 +1490,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
43740
43741 EXPORT_SYMBOL(search_binary_handler);
43742
43743+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43744+static DEFINE_PER_CPU(u64, exec_counter);
43745+static int __init init_exec_counters(void)
43746+{
43747+ unsigned int cpu;
43748+
43749+ for_each_possible_cpu(cpu) {
43750+ per_cpu(exec_counter, cpu) = (u64)cpu;
43751+ }
43752+
43753+ return 0;
43754+}
43755+early_initcall(init_exec_counters);
43756+static inline void increment_exec_counter(void)
43757+{
43758+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
43759+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
43760+}
43761+#else
43762+static inline void increment_exec_counter(void) {}
43763+#endif
43764+
43765 /*
43766 * sys_execve() executes a new program.
43767 */
43768@@ -1459,6 +1520,11 @@ static int do_execve_common(const char *filename,
43769 struct user_arg_ptr envp,
43770 struct pt_regs *regs)
43771 {
43772+#ifdef CONFIG_GRKERNSEC
43773+ struct file *old_exec_file;
43774+ struct acl_subject_label *old_acl;
43775+ struct rlimit old_rlim[RLIM_NLIMITS];
43776+#endif
43777 struct linux_binprm *bprm;
43778 struct file *file;
43779 struct files_struct *displaced;
43780@@ -1466,6 +1532,8 @@ static int do_execve_common(const char *filename,
43781 int retval;
43782 const struct cred *cred = current_cred();
43783
43784+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
43785+
43786 /*
43787 * We move the actual failure in case of RLIMIT_NPROC excess from
43788 * set*uid() to execve() because too many poorly written programs
43789@@ -1506,12 +1574,27 @@ static int do_execve_common(const char *filename,
43790 if (IS_ERR(file))
43791 goto out_unmark;
43792
43793+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
43794+ retval = -EPERM;
43795+ goto out_file;
43796+ }
43797+
43798 sched_exec();
43799
43800 bprm->file = file;
43801 bprm->filename = filename;
43802 bprm->interp = filename;
43803
43804+ if (gr_process_user_ban()) {
43805+ retval = -EPERM;
43806+ goto out_file;
43807+ }
43808+
43809+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
43810+ retval = -EACCES;
43811+ goto out_file;
43812+ }
43813+
43814 retval = bprm_mm_init(bprm);
43815 if (retval)
43816 goto out_file;
43817@@ -1528,24 +1611,65 @@ static int do_execve_common(const char *filename,
43818 if (retval < 0)
43819 goto out;
43820
43821+#ifdef CONFIG_GRKERNSEC
43822+ old_acl = current->acl;
43823+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
43824+ old_exec_file = current->exec_file;
43825+ get_file(file);
43826+ current->exec_file = file;
43827+#endif
43828+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43829+ /* limit suid stack to 8MB
43830+ we saved the old limits above and will restore them if this exec fails
43831+ */
43832+ if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
43833+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
43834+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
43835+#endif
43836+
43837+ if (!gr_tpe_allow(file)) {
43838+ retval = -EACCES;
43839+ goto out_fail;
43840+ }
43841+
43842+ if (gr_check_crash_exec(file)) {
43843+ retval = -EACCES;
43844+ goto out_fail;
43845+ }
43846+
43847+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
43848+ bprm->unsafe);
43849+ if (retval < 0)
43850+ goto out_fail;
43851+
43852 retval = copy_strings_kernel(1, &bprm->filename, bprm);
43853 if (retval < 0)
43854- goto out;
43855+ goto out_fail;
43856
43857 bprm->exec = bprm->p;
43858 retval = copy_strings(bprm->envc, envp, bprm);
43859 if (retval < 0)
43860- goto out;
43861+ goto out_fail;
43862
43863 retval = copy_strings(bprm->argc, argv, bprm);
43864 if (retval < 0)
43865- goto out;
43866+ goto out_fail;
43867+
43868+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
43869+
43870+ gr_handle_exec_args(bprm, argv);
43871
43872 retval = search_binary_handler(bprm,regs);
43873 if (retval < 0)
43874- goto out;
43875+ goto out_fail;
43876+#ifdef CONFIG_GRKERNSEC
43877+ if (old_exec_file)
43878+ fput(old_exec_file);
43879+#endif
43880
43881 /* execve succeeded */
43882+
43883+ increment_exec_counter();
43884 current->fs->in_exec = 0;
43885 current->in_execve = 0;
43886 acct_update_integrals(current);
43887@@ -1554,6 +1678,14 @@ static int do_execve_common(const char *filename,
43888 put_files_struct(displaced);
43889 return retval;
43890
43891+out_fail:
43892+#ifdef CONFIG_GRKERNSEC
43893+ current->acl = old_acl;
43894+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
43895+ fput(current->exec_file);
43896+ current->exec_file = old_exec_file;
43897+#endif
43898+
43899 out:
43900 if (bprm->mm) {
43901 acct_arg_size(bprm, 0);
43902@@ -1627,7 +1759,7 @@ static int expand_corename(struct core_name *cn)
43903 {
43904 char *old_corename = cn->corename;
43905
43906- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
43907+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
43908 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
43909
43910 if (!cn->corename) {
43911@@ -1724,7 +1856,7 @@ static int format_corename(struct core_name *cn, long signr)
43912 int pid_in_pattern = 0;
43913 int err = 0;
43914
43915- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
43916+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
43917 cn->corename = kmalloc(cn->size, GFP_KERNEL);
43918 cn->used = 0;
43919
43920@@ -1821,6 +1953,228 @@ out:
43921 return ispipe;
43922 }
43923
43924+int pax_check_flags(unsigned long *flags)
43925+{
43926+ int retval = 0;
43927+
43928+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
43929+ if (*flags & MF_PAX_SEGMEXEC)
43930+ {
43931+ *flags &= ~MF_PAX_SEGMEXEC;
43932+ retval = -EINVAL;
43933+ }
43934+#endif
43935+
43936+ if ((*flags & MF_PAX_PAGEEXEC)
43937+
43938+#ifdef CONFIG_PAX_PAGEEXEC
43939+ && (*flags & MF_PAX_SEGMEXEC)
43940+#endif
43941+
43942+ )
43943+ {
43944+ *flags &= ~MF_PAX_PAGEEXEC;
43945+ retval = -EINVAL;
43946+ }
43947+
43948+ if ((*flags & MF_PAX_MPROTECT)
43949+
43950+#ifdef CONFIG_PAX_MPROTECT
43951+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
43952+#endif
43953+
43954+ )
43955+ {
43956+ *flags &= ~MF_PAX_MPROTECT;
43957+ retval = -EINVAL;
43958+ }
43959+
43960+ if ((*flags & MF_PAX_EMUTRAMP)
43961+
43962+#ifdef CONFIG_PAX_EMUTRAMP
43963+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
43964+#endif
43965+
43966+ )
43967+ {
43968+ *flags &= ~MF_PAX_EMUTRAMP;
43969+ retval = -EINVAL;
43970+ }
43971+
43972+ return retval;
43973+}
43974+
43975+EXPORT_SYMBOL(pax_check_flags);
43976+
43977+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43978+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
43979+{
43980+ struct task_struct *tsk = current;
43981+ struct mm_struct *mm = current->mm;
43982+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
43983+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
43984+ char *path_exec = NULL;
43985+ char *path_fault = NULL;
43986+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
43987+
43988+ if (buffer_exec && buffer_fault) {
43989+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
43990+
43991+ down_read(&mm->mmap_sem);
43992+ vma = mm->mmap;
43993+ while (vma && (!vma_exec || !vma_fault)) {
43994+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
43995+ vma_exec = vma;
43996+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
43997+ vma_fault = vma;
43998+ vma = vma->vm_next;
43999+ }
44000+ if (vma_exec) {
44001+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
44002+ if (IS_ERR(path_exec))
44003+ path_exec = "<path too long>";
44004+ else {
44005+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
44006+ if (path_exec) {
44007+ *path_exec = 0;
44008+ path_exec = buffer_exec;
44009+ } else
44010+ path_exec = "<path too long>";
44011+ }
44012+ }
44013+ if (vma_fault) {
44014+ start = vma_fault->vm_start;
44015+ end = vma_fault->vm_end;
44016+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
44017+ if (vma_fault->vm_file) {
44018+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
44019+ if (IS_ERR(path_fault))
44020+ path_fault = "<path too long>";
44021+ else {
44022+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
44023+ if (path_fault) {
44024+ *path_fault = 0;
44025+ path_fault = buffer_fault;
44026+ } else
44027+ path_fault = "<path too long>";
44028+ }
44029+ } else
44030+ path_fault = "<anonymous mapping>";
44031+ }
44032+ up_read(&mm->mmap_sem);
44033+ }
44034+ if (tsk->signal->curr_ip)
44035+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
44036+ else
44037+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
44038+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
44039+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
44040+ task_uid(tsk), task_euid(tsk), pc, sp);
44041+ free_page((unsigned long)buffer_exec);
44042+ free_page((unsigned long)buffer_fault);
44043+ pax_report_insns(regs, pc, sp);
44044+ do_coredump(SIGKILL, SIGKILL, regs);
44045+}
44046+#endif
44047+
44048+#ifdef CONFIG_PAX_REFCOUNT
44049+void pax_report_refcount_overflow(struct pt_regs *regs)
44050+{
44051+ if (current->signal->curr_ip)
44052+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
44053+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
44054+ else
44055+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
44056+ current->comm, task_pid_nr(current), current_uid(), current_euid());
44057+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
44058+ show_regs(regs);
44059+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
44060+}
44061+#endif
44062+
44063+#ifdef CONFIG_PAX_USERCOPY
44064+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
44065+int object_is_on_stack(const void *obj, unsigned long len)
44066+{
44067+ const void * const stack = task_stack_page(current);
44068+ const void * const stackend = stack + THREAD_SIZE;
44069+
44070+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
44071+ const void *frame = NULL;
44072+ const void *oldframe;
44073+#endif
44074+
44075+ if (obj + len < obj)
44076+ return -1;
44077+
44078+ if (obj + len <= stack || stackend <= obj)
44079+ return 0;
44080+
44081+ if (obj < stack || stackend < obj + len)
44082+ return -1;
44083+
44084+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
44085+ oldframe = __builtin_frame_address(1);
44086+ if (oldframe)
44087+ frame = __builtin_frame_address(2);
44088+ /*
44089+ low ----------------------------------------------> high
44090+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
44091+ ^----------------^
44092+ allow copies only within here
44093+ */
44094+ while (stack <= frame && frame < stackend) {
44095+ /* if obj + len extends past the last frame, this
44096+ check won't pass and the next frame will be 0,
44097+ causing us to bail out and correctly report
44098+ the copy as invalid
44099+ */
44100+ if (obj + len <= frame)
44101+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
44102+ oldframe = frame;
44103+ frame = *(const void * const *)frame;
44104+ }
44105+ return -1;
44106+#else
44107+ return 1;
44108+#endif
44109+}
44110+
44111+__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
44112+{
44113+ if (current->signal->curr_ip)
44114+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44115+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
44116+ else
44117+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44118+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
44119+ dump_stack();
44120+ gr_handle_kernel_exploit();
44121+ do_group_exit(SIGKILL);
44122+}
44123+#endif
44124+
44125+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
44126+void pax_track_stack(void)
44127+{
44128+ unsigned long sp = (unsigned long)&sp;
44129+ if (sp < current_thread_info()->lowest_stack &&
44130+ sp > (unsigned long)task_stack_page(current))
44131+ current_thread_info()->lowest_stack = sp;
44132+}
44133+EXPORT_SYMBOL(pax_track_stack);
44134+#endif
44135+
44136+#ifdef CONFIG_PAX_SIZE_OVERFLOW
44137+void report_size_overflow(const char *file, unsigned int line, const char *func)
44138+{
44139+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u\n", func, file, line);
44140+ dump_stack();
44141+ do_group_exit(SIGKILL);
44142+}
44143+EXPORT_SYMBOL(report_size_overflow);
44144+#endif
44145+
44146 static int zap_process(struct task_struct *start, int exit_code)
44147 {
44148 struct task_struct *t;
44149@@ -2018,17 +2372,17 @@ static void wait_for_dump_helpers(struct file *file)
44150 pipe = file->f_path.dentry->d_inode->i_pipe;
44151
44152 pipe_lock(pipe);
44153- pipe->readers++;
44154- pipe->writers--;
44155+ atomic_inc(&pipe->readers);
44156+ atomic_dec(&pipe->writers);
44157
44158- while ((pipe->readers > 1) && (!signal_pending(current))) {
44159+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
44160 wake_up_interruptible_sync(&pipe->wait);
44161 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44162 pipe_wait(pipe);
44163 }
44164
44165- pipe->readers--;
44166- pipe->writers++;
44167+ atomic_dec(&pipe->readers);
44168+ atomic_inc(&pipe->writers);
44169 pipe_unlock(pipe);
44170
44171 }
44172@@ -2089,7 +2443,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44173 int retval = 0;
44174 int flag = 0;
44175 int ispipe;
44176- static atomic_t core_dump_count = ATOMIC_INIT(0);
44177+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
44178 struct coredump_params cprm = {
44179 .signr = signr,
44180 .regs = regs,
44181@@ -2104,6 +2458,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44182
44183 audit_core_dumps(signr);
44184
44185+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
44186+ gr_handle_brute_attach(current, cprm.mm_flags);
44187+
44188 binfmt = mm->binfmt;
44189 if (!binfmt || !binfmt->core_dump)
44190 goto fail;
44191@@ -2171,7 +2528,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44192 }
44193 cprm.limit = RLIM_INFINITY;
44194
44195- dump_count = atomic_inc_return(&core_dump_count);
44196+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
44197 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
44198 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
44199 task_tgid_vnr(current), current->comm);
44200@@ -2198,6 +2555,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44201 } else {
44202 struct inode *inode;
44203
44204+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
44205+
44206 if (cprm.limit < binfmt->min_coredump)
44207 goto fail_unlock;
44208
44209@@ -2241,7 +2600,7 @@ close_fail:
44210 filp_close(cprm.file, NULL);
44211 fail_dropcount:
44212 if (ispipe)
44213- atomic_dec(&core_dump_count);
44214+ atomic_dec_unchecked(&core_dump_count);
44215 fail_unlock:
44216 kfree(cn.corename);
44217 fail_corename:
44218@@ -2260,7 +2619,7 @@ fail:
44219 */
44220 int dump_write(struct file *file, const void *addr, int nr)
44221 {
44222- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
44223+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
44224 }
44225 EXPORT_SYMBOL(dump_write);
44226
44227diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
44228index a8cbe1b..fed04cb 100644
44229--- a/fs/ext2/balloc.c
44230+++ b/fs/ext2/balloc.c
44231@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
44232
44233 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44234 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44235- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44236+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
44237 sbi->s_resuid != current_fsuid() &&
44238 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44239 return 0;
44240diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
44241index baac1b1..1499b62 100644
44242--- a/fs/ext3/balloc.c
44243+++ b/fs/ext3/balloc.c
44244@@ -1438,9 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
44245
44246 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44247 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44248- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44249+ if (free_blocks < root_blocks + 1 &&
44250 !use_reservation && sbi->s_resuid != current_fsuid() &&
44251- (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44252+ (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
44253+ !capable_nolog(CAP_SYS_RESOURCE)) {
44254 return 0;
44255 }
44256 return 1;
44257diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
44258index 8da837b..ed3835b 100644
44259--- a/fs/ext4/balloc.c
44260+++ b/fs/ext4/balloc.c
44261@@ -463,8 +463,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
44262 /* Hm, nope. Are (enough) root reserved clusters available? */
44263 if (sbi->s_resuid == current_fsuid() ||
44264 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
44265- capable(CAP_SYS_RESOURCE) ||
44266- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
44267+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
44268+ capable_nolog(CAP_SYS_RESOURCE)) {
44269
44270 if (free_clusters >= (nclusters + dirty_clusters))
44271 return 1;
44272diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
44273index 0e01e90..ae2bd5e 100644
44274--- a/fs/ext4/ext4.h
44275+++ b/fs/ext4/ext4.h
44276@@ -1225,19 +1225,19 @@ struct ext4_sb_info {
44277 unsigned long s_mb_last_start;
44278
44279 /* stats for buddy allocator */
44280- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
44281- atomic_t s_bal_success; /* we found long enough chunks */
44282- atomic_t s_bal_allocated; /* in blocks */
44283- atomic_t s_bal_ex_scanned; /* total extents scanned */
44284- atomic_t s_bal_goals; /* goal hits */
44285- atomic_t s_bal_breaks; /* too long searches */
44286- atomic_t s_bal_2orders; /* 2^order hits */
44287+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
44288+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
44289+ atomic_unchecked_t s_bal_allocated; /* in blocks */
44290+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
44291+ atomic_unchecked_t s_bal_goals; /* goal hits */
44292+ atomic_unchecked_t s_bal_breaks; /* too long searches */
44293+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
44294 spinlock_t s_bal_lock;
44295 unsigned long s_mb_buddies_generated;
44296 unsigned long long s_mb_generation_time;
44297- atomic_t s_mb_lost_chunks;
44298- atomic_t s_mb_preallocated;
44299- atomic_t s_mb_discarded;
44300+ atomic_unchecked_t s_mb_lost_chunks;
44301+ atomic_unchecked_t s_mb_preallocated;
44302+ atomic_unchecked_t s_mb_discarded;
44303 atomic_t s_lock_busy;
44304
44305 /* locality groups */
44306diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
44307index 6b0a57e..1955a44 100644
44308--- a/fs/ext4/mballoc.c
44309+++ b/fs/ext4/mballoc.c
44310@@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
44311 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
44312
44313 if (EXT4_SB(sb)->s_mb_stats)
44314- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
44315+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
44316
44317 break;
44318 }
44319@@ -2041,7 +2041,7 @@ repeat:
44320 ac->ac_status = AC_STATUS_CONTINUE;
44321 ac->ac_flags |= EXT4_MB_HINT_FIRST;
44322 cr = 3;
44323- atomic_inc(&sbi->s_mb_lost_chunks);
44324+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
44325 goto repeat;
44326 }
44327 }
44328@@ -2545,25 +2545,25 @@ int ext4_mb_release(struct super_block *sb)
44329 if (sbi->s_mb_stats) {
44330 ext4_msg(sb, KERN_INFO,
44331 "mballoc: %u blocks %u reqs (%u success)",
44332- atomic_read(&sbi->s_bal_allocated),
44333- atomic_read(&sbi->s_bal_reqs),
44334- atomic_read(&sbi->s_bal_success));
44335+ atomic_read_unchecked(&sbi->s_bal_allocated),
44336+ atomic_read_unchecked(&sbi->s_bal_reqs),
44337+ atomic_read_unchecked(&sbi->s_bal_success));
44338 ext4_msg(sb, KERN_INFO,
44339 "mballoc: %u extents scanned, %u goal hits, "
44340 "%u 2^N hits, %u breaks, %u lost",
44341- atomic_read(&sbi->s_bal_ex_scanned),
44342- atomic_read(&sbi->s_bal_goals),
44343- atomic_read(&sbi->s_bal_2orders),
44344- atomic_read(&sbi->s_bal_breaks),
44345- atomic_read(&sbi->s_mb_lost_chunks));
44346+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
44347+ atomic_read_unchecked(&sbi->s_bal_goals),
44348+ atomic_read_unchecked(&sbi->s_bal_2orders),
44349+ atomic_read_unchecked(&sbi->s_bal_breaks),
44350+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
44351 ext4_msg(sb, KERN_INFO,
44352 "mballoc: %lu generated and it took %Lu",
44353 sbi->s_mb_buddies_generated,
44354 sbi->s_mb_generation_time);
44355 ext4_msg(sb, KERN_INFO,
44356 "mballoc: %u preallocated, %u discarded",
44357- atomic_read(&sbi->s_mb_preallocated),
44358- atomic_read(&sbi->s_mb_discarded));
44359+ atomic_read_unchecked(&sbi->s_mb_preallocated),
44360+ atomic_read_unchecked(&sbi->s_mb_discarded));
44361 }
44362
44363 free_percpu(sbi->s_locality_groups);
44364@@ -3045,16 +3045,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
44365 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
44366
44367 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
44368- atomic_inc(&sbi->s_bal_reqs);
44369- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44370+ atomic_inc_unchecked(&sbi->s_bal_reqs);
44371+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44372 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
44373- atomic_inc(&sbi->s_bal_success);
44374- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
44375+ atomic_inc_unchecked(&sbi->s_bal_success);
44376+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
44377 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
44378 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
44379- atomic_inc(&sbi->s_bal_goals);
44380+ atomic_inc_unchecked(&sbi->s_bal_goals);
44381 if (ac->ac_found > sbi->s_mb_max_to_scan)
44382- atomic_inc(&sbi->s_bal_breaks);
44383+ atomic_inc_unchecked(&sbi->s_bal_breaks);
44384 }
44385
44386 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
44387@@ -3458,7 +3458,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
44388 trace_ext4_mb_new_inode_pa(ac, pa);
44389
44390 ext4_mb_use_inode_pa(ac, pa);
44391- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
44392+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
44393
44394 ei = EXT4_I(ac->ac_inode);
44395 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44396@@ -3518,7 +3518,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
44397 trace_ext4_mb_new_group_pa(ac, pa);
44398
44399 ext4_mb_use_group_pa(ac, pa);
44400- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44401+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44402
44403 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44404 lg = ac->ac_lg;
44405@@ -3607,7 +3607,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
44406 * from the bitmap and continue.
44407 */
44408 }
44409- atomic_add(free, &sbi->s_mb_discarded);
44410+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
44411
44412 return err;
44413 }
44414@@ -3625,7 +3625,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
44415 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
44416 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
44417 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
44418- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44419+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44420 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
44421
44422 return 0;
44423diff --git a/fs/fcntl.c b/fs/fcntl.c
44424index 75e7c1f..1eb3e4d 100644
44425--- a/fs/fcntl.c
44426+++ b/fs/fcntl.c
44427@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
44428 if (err)
44429 return err;
44430
44431+ if (gr_handle_chroot_fowner(pid, type))
44432+ return -ENOENT;
44433+ if (gr_check_protected_task_fowner(pid, type))
44434+ return -EACCES;
44435+
44436 f_modown(filp, pid, type, force);
44437 return 0;
44438 }
44439@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
44440
44441 static int f_setown_ex(struct file *filp, unsigned long arg)
44442 {
44443- struct f_owner_ex * __user owner_p = (void * __user)arg;
44444+ struct f_owner_ex __user *owner_p = (void __user *)arg;
44445 struct f_owner_ex owner;
44446 struct pid *pid;
44447 int type;
44448@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
44449
44450 static int f_getown_ex(struct file *filp, unsigned long arg)
44451 {
44452- struct f_owner_ex * __user owner_p = (void * __user)arg;
44453+ struct f_owner_ex __user *owner_p = (void __user *)arg;
44454 struct f_owner_ex owner;
44455 int ret = 0;
44456
44457@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
44458 switch (cmd) {
44459 case F_DUPFD:
44460 case F_DUPFD_CLOEXEC:
44461+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
44462 if (arg >= rlimit(RLIMIT_NOFILE))
44463 break;
44464 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
44465diff --git a/fs/fifo.c b/fs/fifo.c
44466index b1a524d..4ee270e 100644
44467--- a/fs/fifo.c
44468+++ b/fs/fifo.c
44469@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
44470 */
44471 filp->f_op = &read_pipefifo_fops;
44472 pipe->r_counter++;
44473- if (pipe->readers++ == 0)
44474+ if (atomic_inc_return(&pipe->readers) == 1)
44475 wake_up_partner(inode);
44476
44477- if (!pipe->writers) {
44478+ if (!atomic_read(&pipe->writers)) {
44479 if ((filp->f_flags & O_NONBLOCK)) {
44480 /* suppress POLLHUP until we have
44481 * seen a writer */
44482@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
44483 * errno=ENXIO when there is no process reading the FIFO.
44484 */
44485 ret = -ENXIO;
44486- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
44487+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
44488 goto err;
44489
44490 filp->f_op = &write_pipefifo_fops;
44491 pipe->w_counter++;
44492- if (!pipe->writers++)
44493+ if (atomic_inc_return(&pipe->writers) == 1)
44494 wake_up_partner(inode);
44495
44496- if (!pipe->readers) {
44497+ if (!atomic_read(&pipe->readers)) {
44498 wait_for_partner(inode, &pipe->r_counter);
44499 if (signal_pending(current))
44500 goto err_wr;
44501@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
44502 */
44503 filp->f_op = &rdwr_pipefifo_fops;
44504
44505- pipe->readers++;
44506- pipe->writers++;
44507+ atomic_inc(&pipe->readers);
44508+ atomic_inc(&pipe->writers);
44509 pipe->r_counter++;
44510 pipe->w_counter++;
44511- if (pipe->readers == 1 || pipe->writers == 1)
44512+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
44513 wake_up_partner(inode);
44514 break;
44515
44516@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
44517 return 0;
44518
44519 err_rd:
44520- if (!--pipe->readers)
44521+ if (atomic_dec_and_test(&pipe->readers))
44522 wake_up_interruptible(&pipe->wait);
44523 ret = -ERESTARTSYS;
44524 goto err;
44525
44526 err_wr:
44527- if (!--pipe->writers)
44528+ if (atomic_dec_and_test(&pipe->writers))
44529 wake_up_interruptible(&pipe->wait);
44530 ret = -ERESTARTSYS;
44531 goto err;
44532
44533 err:
44534- if (!pipe->readers && !pipe->writers)
44535+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
44536 free_pipe_info(inode);
44537
44538 err_nocleanup:
44539diff --git a/fs/file.c b/fs/file.c
44540index ba3f605..fade102 100644
44541--- a/fs/file.c
44542+++ b/fs/file.c
44543@@ -15,6 +15,7 @@
44544 #include <linux/slab.h>
44545 #include <linux/vmalloc.h>
44546 #include <linux/file.h>
44547+#include <linux/security.h>
44548 #include <linux/fdtable.h>
44549 #include <linux/bitops.h>
44550 #include <linux/interrupt.h>
44551@@ -255,6 +256,7 @@ int expand_files(struct files_struct *files, int nr)
44552 * N.B. For clone tasks sharing a files structure, this test
44553 * will limit the total number of files that can be opened.
44554 */
44555+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
44556 if (nr >= rlimit(RLIMIT_NOFILE))
44557 return -EMFILE;
44558
44559diff --git a/fs/filesystems.c b/fs/filesystems.c
44560index 96f2428..f5eeb8e 100644
44561--- a/fs/filesystems.c
44562+++ b/fs/filesystems.c
44563@@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
44564 int len = dot ? dot - name : strlen(name);
44565
44566 fs = __get_fs_type(name, len);
44567+
44568+#ifdef CONFIG_GRKERNSEC_MODHARDEN
44569+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
44570+#else
44571 if (!fs && (request_module("%.*s", len, name) == 0))
44572+#endif
44573 fs = __get_fs_type(name, len);
44574
44575 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
44576diff --git a/fs/fs_struct.c b/fs/fs_struct.c
44577index e159e68..e7d2a6f 100644
44578--- a/fs/fs_struct.c
44579+++ b/fs/fs_struct.c
44580@@ -4,6 +4,7 @@
44581 #include <linux/path.h>
44582 #include <linux/slab.h>
44583 #include <linux/fs_struct.h>
44584+#include <linux/grsecurity.h>
44585 #include "internal.h"
44586
44587 static inline void path_get_longterm(struct path *path)
44588@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
44589 write_seqcount_begin(&fs->seq);
44590 old_root = fs->root;
44591 fs->root = *path;
44592+ gr_set_chroot_entries(current, path);
44593 write_seqcount_end(&fs->seq);
44594 spin_unlock(&fs->lock);
44595 if (old_root.dentry)
44596@@ -65,6 +67,17 @@ static inline int replace_path(struct path *p, const struct path *old, const str
44597 return 1;
44598 }
44599
44600+static inline int replace_root_path(struct task_struct *task, struct path *p, const struct path *old, struct path *new)
44601+{
44602+ if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
44603+ return 0;
44604+ *p = *new;
44605+
44606+ gr_set_chroot_entries(task, new);
44607+
44608+ return 1;
44609+}
44610+
44611 void chroot_fs_refs(struct path *old_root, struct path *new_root)
44612 {
44613 struct task_struct *g, *p;
44614@@ -79,7 +92,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
44615 int hits = 0;
44616 spin_lock(&fs->lock);
44617 write_seqcount_begin(&fs->seq);
44618- hits += replace_path(&fs->root, old_root, new_root);
44619+ hits += replace_root_path(p, &fs->root, old_root, new_root);
44620 hits += replace_path(&fs->pwd, old_root, new_root);
44621 write_seqcount_end(&fs->seq);
44622 while (hits--) {
44623@@ -111,7 +124,8 @@ void exit_fs(struct task_struct *tsk)
44624 task_lock(tsk);
44625 spin_lock(&fs->lock);
44626 tsk->fs = NULL;
44627- kill = !--fs->users;
44628+ gr_clear_chroot_entries(tsk);
44629+ kill = !atomic_dec_return(&fs->users);
44630 spin_unlock(&fs->lock);
44631 task_unlock(tsk);
44632 if (kill)
44633@@ -124,7 +138,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44634 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
44635 /* We don't need to lock fs - think why ;-) */
44636 if (fs) {
44637- fs->users = 1;
44638+ atomic_set(&fs->users, 1);
44639 fs->in_exec = 0;
44640 spin_lock_init(&fs->lock);
44641 seqcount_init(&fs->seq);
44642@@ -133,6 +147,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44643 spin_lock(&old->lock);
44644 fs->root = old->root;
44645 path_get_longterm(&fs->root);
44646+ /* instead of calling gr_set_chroot_entries here,
44647+ we call it from every caller of this function
44648+ */
44649 fs->pwd = old->pwd;
44650 path_get_longterm(&fs->pwd);
44651 spin_unlock(&old->lock);
44652@@ -151,8 +168,9 @@ int unshare_fs_struct(void)
44653
44654 task_lock(current);
44655 spin_lock(&fs->lock);
44656- kill = !--fs->users;
44657+ kill = !atomic_dec_return(&fs->users);
44658 current->fs = new_fs;
44659+ gr_set_chroot_entries(current, &new_fs->root);
44660 spin_unlock(&fs->lock);
44661 task_unlock(current);
44662
44663@@ -165,13 +183,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
44664
44665 int current_umask(void)
44666 {
44667- return current->fs->umask;
44668+ return current->fs->umask | gr_acl_umask();
44669 }
44670 EXPORT_SYMBOL(current_umask);
44671
44672 /* to be mentioned only in INIT_TASK */
44673 struct fs_struct init_fs = {
44674- .users = 1,
44675+ .users = ATOMIC_INIT(1),
44676 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
44677 .seq = SEQCNT_ZERO,
44678 .umask = 0022,
44679@@ -187,12 +205,13 @@ void daemonize_fs_struct(void)
44680 task_lock(current);
44681
44682 spin_lock(&init_fs.lock);
44683- init_fs.users++;
44684+ atomic_inc(&init_fs.users);
44685 spin_unlock(&init_fs.lock);
44686
44687 spin_lock(&fs->lock);
44688 current->fs = &init_fs;
44689- kill = !--fs->users;
44690+ gr_set_chroot_entries(current, &current->fs->root);
44691+ kill = !atomic_dec_return(&fs->users);
44692 spin_unlock(&fs->lock);
44693
44694 task_unlock(current);
44695diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
44696index 9905350..02eaec4 100644
44697--- a/fs/fscache/cookie.c
44698+++ b/fs/fscache/cookie.c
44699@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
44700 parent ? (char *) parent->def->name : "<no-parent>",
44701 def->name, netfs_data);
44702
44703- fscache_stat(&fscache_n_acquires);
44704+ fscache_stat_unchecked(&fscache_n_acquires);
44705
44706 /* if there's no parent cookie, then we don't create one here either */
44707 if (!parent) {
44708- fscache_stat(&fscache_n_acquires_null);
44709+ fscache_stat_unchecked(&fscache_n_acquires_null);
44710 _leave(" [no parent]");
44711 return NULL;
44712 }
44713@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
44714 /* allocate and initialise a cookie */
44715 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
44716 if (!cookie) {
44717- fscache_stat(&fscache_n_acquires_oom);
44718+ fscache_stat_unchecked(&fscache_n_acquires_oom);
44719 _leave(" [ENOMEM]");
44720 return NULL;
44721 }
44722@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44723
44724 switch (cookie->def->type) {
44725 case FSCACHE_COOKIE_TYPE_INDEX:
44726- fscache_stat(&fscache_n_cookie_index);
44727+ fscache_stat_unchecked(&fscache_n_cookie_index);
44728 break;
44729 case FSCACHE_COOKIE_TYPE_DATAFILE:
44730- fscache_stat(&fscache_n_cookie_data);
44731+ fscache_stat_unchecked(&fscache_n_cookie_data);
44732 break;
44733 default:
44734- fscache_stat(&fscache_n_cookie_special);
44735+ fscache_stat_unchecked(&fscache_n_cookie_special);
44736 break;
44737 }
44738
44739@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44740 if (fscache_acquire_non_index_cookie(cookie) < 0) {
44741 atomic_dec(&parent->n_children);
44742 __fscache_cookie_put(cookie);
44743- fscache_stat(&fscache_n_acquires_nobufs);
44744+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
44745 _leave(" = NULL");
44746 return NULL;
44747 }
44748 }
44749
44750- fscache_stat(&fscache_n_acquires_ok);
44751+ fscache_stat_unchecked(&fscache_n_acquires_ok);
44752 _leave(" = %p", cookie);
44753 return cookie;
44754 }
44755@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
44756 cache = fscache_select_cache_for_object(cookie->parent);
44757 if (!cache) {
44758 up_read(&fscache_addremove_sem);
44759- fscache_stat(&fscache_n_acquires_no_cache);
44760+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
44761 _leave(" = -ENOMEDIUM [no cache]");
44762 return -ENOMEDIUM;
44763 }
44764@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
44765 object = cache->ops->alloc_object(cache, cookie);
44766 fscache_stat_d(&fscache_n_cop_alloc_object);
44767 if (IS_ERR(object)) {
44768- fscache_stat(&fscache_n_object_no_alloc);
44769+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
44770 ret = PTR_ERR(object);
44771 goto error;
44772 }
44773
44774- fscache_stat(&fscache_n_object_alloc);
44775+ fscache_stat_unchecked(&fscache_n_object_alloc);
44776
44777 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
44778
44779@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
44780 struct fscache_object *object;
44781 struct hlist_node *_p;
44782
44783- fscache_stat(&fscache_n_updates);
44784+ fscache_stat_unchecked(&fscache_n_updates);
44785
44786 if (!cookie) {
44787- fscache_stat(&fscache_n_updates_null);
44788+ fscache_stat_unchecked(&fscache_n_updates_null);
44789 _leave(" [no cookie]");
44790 return;
44791 }
44792@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
44793 struct fscache_object *object;
44794 unsigned long event;
44795
44796- fscache_stat(&fscache_n_relinquishes);
44797+ fscache_stat_unchecked(&fscache_n_relinquishes);
44798 if (retire)
44799- fscache_stat(&fscache_n_relinquishes_retire);
44800+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
44801
44802 if (!cookie) {
44803- fscache_stat(&fscache_n_relinquishes_null);
44804+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
44805 _leave(" [no cookie]");
44806 return;
44807 }
44808@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
44809
44810 /* wait for the cookie to finish being instantiated (or to fail) */
44811 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
44812- fscache_stat(&fscache_n_relinquishes_waitcrt);
44813+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
44814 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
44815 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
44816 }
44817diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
44818index f6aad48..88dcf26 100644
44819--- a/fs/fscache/internal.h
44820+++ b/fs/fscache/internal.h
44821@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
44822 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
44823 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
44824
44825-extern atomic_t fscache_n_op_pend;
44826-extern atomic_t fscache_n_op_run;
44827-extern atomic_t fscache_n_op_enqueue;
44828-extern atomic_t fscache_n_op_deferred_release;
44829-extern atomic_t fscache_n_op_release;
44830-extern atomic_t fscache_n_op_gc;
44831-extern atomic_t fscache_n_op_cancelled;
44832-extern atomic_t fscache_n_op_rejected;
44833+extern atomic_unchecked_t fscache_n_op_pend;
44834+extern atomic_unchecked_t fscache_n_op_run;
44835+extern atomic_unchecked_t fscache_n_op_enqueue;
44836+extern atomic_unchecked_t fscache_n_op_deferred_release;
44837+extern atomic_unchecked_t fscache_n_op_release;
44838+extern atomic_unchecked_t fscache_n_op_gc;
44839+extern atomic_unchecked_t fscache_n_op_cancelled;
44840+extern atomic_unchecked_t fscache_n_op_rejected;
44841
44842-extern atomic_t fscache_n_attr_changed;
44843-extern atomic_t fscache_n_attr_changed_ok;
44844-extern atomic_t fscache_n_attr_changed_nobufs;
44845-extern atomic_t fscache_n_attr_changed_nomem;
44846-extern atomic_t fscache_n_attr_changed_calls;
44847+extern atomic_unchecked_t fscache_n_attr_changed;
44848+extern atomic_unchecked_t fscache_n_attr_changed_ok;
44849+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
44850+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
44851+extern atomic_unchecked_t fscache_n_attr_changed_calls;
44852
44853-extern atomic_t fscache_n_allocs;
44854-extern atomic_t fscache_n_allocs_ok;
44855-extern atomic_t fscache_n_allocs_wait;
44856-extern atomic_t fscache_n_allocs_nobufs;
44857-extern atomic_t fscache_n_allocs_intr;
44858-extern atomic_t fscache_n_allocs_object_dead;
44859-extern atomic_t fscache_n_alloc_ops;
44860-extern atomic_t fscache_n_alloc_op_waits;
44861+extern atomic_unchecked_t fscache_n_allocs;
44862+extern atomic_unchecked_t fscache_n_allocs_ok;
44863+extern atomic_unchecked_t fscache_n_allocs_wait;
44864+extern atomic_unchecked_t fscache_n_allocs_nobufs;
44865+extern atomic_unchecked_t fscache_n_allocs_intr;
44866+extern atomic_unchecked_t fscache_n_allocs_object_dead;
44867+extern atomic_unchecked_t fscache_n_alloc_ops;
44868+extern atomic_unchecked_t fscache_n_alloc_op_waits;
44869
44870-extern atomic_t fscache_n_retrievals;
44871-extern atomic_t fscache_n_retrievals_ok;
44872-extern atomic_t fscache_n_retrievals_wait;
44873-extern atomic_t fscache_n_retrievals_nodata;
44874-extern atomic_t fscache_n_retrievals_nobufs;
44875-extern atomic_t fscache_n_retrievals_intr;
44876-extern atomic_t fscache_n_retrievals_nomem;
44877-extern atomic_t fscache_n_retrievals_object_dead;
44878-extern atomic_t fscache_n_retrieval_ops;
44879-extern atomic_t fscache_n_retrieval_op_waits;
44880+extern atomic_unchecked_t fscache_n_retrievals;
44881+extern atomic_unchecked_t fscache_n_retrievals_ok;
44882+extern atomic_unchecked_t fscache_n_retrievals_wait;
44883+extern atomic_unchecked_t fscache_n_retrievals_nodata;
44884+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
44885+extern atomic_unchecked_t fscache_n_retrievals_intr;
44886+extern atomic_unchecked_t fscache_n_retrievals_nomem;
44887+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
44888+extern atomic_unchecked_t fscache_n_retrieval_ops;
44889+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
44890
44891-extern atomic_t fscache_n_stores;
44892-extern atomic_t fscache_n_stores_ok;
44893-extern atomic_t fscache_n_stores_again;
44894-extern atomic_t fscache_n_stores_nobufs;
44895-extern atomic_t fscache_n_stores_oom;
44896-extern atomic_t fscache_n_store_ops;
44897-extern atomic_t fscache_n_store_calls;
44898-extern atomic_t fscache_n_store_pages;
44899-extern atomic_t fscache_n_store_radix_deletes;
44900-extern atomic_t fscache_n_store_pages_over_limit;
44901+extern atomic_unchecked_t fscache_n_stores;
44902+extern atomic_unchecked_t fscache_n_stores_ok;
44903+extern atomic_unchecked_t fscache_n_stores_again;
44904+extern atomic_unchecked_t fscache_n_stores_nobufs;
44905+extern atomic_unchecked_t fscache_n_stores_oom;
44906+extern atomic_unchecked_t fscache_n_store_ops;
44907+extern atomic_unchecked_t fscache_n_store_calls;
44908+extern atomic_unchecked_t fscache_n_store_pages;
44909+extern atomic_unchecked_t fscache_n_store_radix_deletes;
44910+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
44911
44912-extern atomic_t fscache_n_store_vmscan_not_storing;
44913-extern atomic_t fscache_n_store_vmscan_gone;
44914-extern atomic_t fscache_n_store_vmscan_busy;
44915-extern atomic_t fscache_n_store_vmscan_cancelled;
44916+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
44917+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
44918+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
44919+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
44920
44921-extern atomic_t fscache_n_marks;
44922-extern atomic_t fscache_n_uncaches;
44923+extern atomic_unchecked_t fscache_n_marks;
44924+extern atomic_unchecked_t fscache_n_uncaches;
44925
44926-extern atomic_t fscache_n_acquires;
44927-extern atomic_t fscache_n_acquires_null;
44928-extern atomic_t fscache_n_acquires_no_cache;
44929-extern atomic_t fscache_n_acquires_ok;
44930-extern atomic_t fscache_n_acquires_nobufs;
44931-extern atomic_t fscache_n_acquires_oom;
44932+extern atomic_unchecked_t fscache_n_acquires;
44933+extern atomic_unchecked_t fscache_n_acquires_null;
44934+extern atomic_unchecked_t fscache_n_acquires_no_cache;
44935+extern atomic_unchecked_t fscache_n_acquires_ok;
44936+extern atomic_unchecked_t fscache_n_acquires_nobufs;
44937+extern atomic_unchecked_t fscache_n_acquires_oom;
44938
44939-extern atomic_t fscache_n_updates;
44940-extern atomic_t fscache_n_updates_null;
44941-extern atomic_t fscache_n_updates_run;
44942+extern atomic_unchecked_t fscache_n_updates;
44943+extern atomic_unchecked_t fscache_n_updates_null;
44944+extern atomic_unchecked_t fscache_n_updates_run;
44945
44946-extern atomic_t fscache_n_relinquishes;
44947-extern atomic_t fscache_n_relinquishes_null;
44948-extern atomic_t fscache_n_relinquishes_waitcrt;
44949-extern atomic_t fscache_n_relinquishes_retire;
44950+extern atomic_unchecked_t fscache_n_relinquishes;
44951+extern atomic_unchecked_t fscache_n_relinquishes_null;
44952+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
44953+extern atomic_unchecked_t fscache_n_relinquishes_retire;
44954
44955-extern atomic_t fscache_n_cookie_index;
44956-extern atomic_t fscache_n_cookie_data;
44957-extern atomic_t fscache_n_cookie_special;
44958+extern atomic_unchecked_t fscache_n_cookie_index;
44959+extern atomic_unchecked_t fscache_n_cookie_data;
44960+extern atomic_unchecked_t fscache_n_cookie_special;
44961
44962-extern atomic_t fscache_n_object_alloc;
44963-extern atomic_t fscache_n_object_no_alloc;
44964-extern atomic_t fscache_n_object_lookups;
44965-extern atomic_t fscache_n_object_lookups_negative;
44966-extern atomic_t fscache_n_object_lookups_positive;
44967-extern atomic_t fscache_n_object_lookups_timed_out;
44968-extern atomic_t fscache_n_object_created;
44969-extern atomic_t fscache_n_object_avail;
44970-extern atomic_t fscache_n_object_dead;
44971+extern atomic_unchecked_t fscache_n_object_alloc;
44972+extern atomic_unchecked_t fscache_n_object_no_alloc;
44973+extern atomic_unchecked_t fscache_n_object_lookups;
44974+extern atomic_unchecked_t fscache_n_object_lookups_negative;
44975+extern atomic_unchecked_t fscache_n_object_lookups_positive;
44976+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
44977+extern atomic_unchecked_t fscache_n_object_created;
44978+extern atomic_unchecked_t fscache_n_object_avail;
44979+extern atomic_unchecked_t fscache_n_object_dead;
44980
44981-extern atomic_t fscache_n_checkaux_none;
44982-extern atomic_t fscache_n_checkaux_okay;
44983-extern atomic_t fscache_n_checkaux_update;
44984-extern atomic_t fscache_n_checkaux_obsolete;
44985+extern atomic_unchecked_t fscache_n_checkaux_none;
44986+extern atomic_unchecked_t fscache_n_checkaux_okay;
44987+extern atomic_unchecked_t fscache_n_checkaux_update;
44988+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
44989
44990 extern atomic_t fscache_n_cop_alloc_object;
44991 extern atomic_t fscache_n_cop_lookup_object;
44992@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
44993 atomic_inc(stat);
44994 }
44995
44996+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
44997+{
44998+ atomic_inc_unchecked(stat);
44999+}
45000+
45001 static inline void fscache_stat_d(atomic_t *stat)
45002 {
45003 atomic_dec(stat);
45004@@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
45005
45006 #define __fscache_stat(stat) (NULL)
45007 #define fscache_stat(stat) do {} while (0)
45008+#define fscache_stat_unchecked(stat) do {} while (0)
45009 #define fscache_stat_d(stat) do {} while (0)
45010 #endif
45011
45012diff --git a/fs/fscache/object.c b/fs/fscache/object.c
45013index b6b897c..0ffff9c 100644
45014--- a/fs/fscache/object.c
45015+++ b/fs/fscache/object.c
45016@@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45017 /* update the object metadata on disk */
45018 case FSCACHE_OBJECT_UPDATING:
45019 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
45020- fscache_stat(&fscache_n_updates_run);
45021+ fscache_stat_unchecked(&fscache_n_updates_run);
45022 fscache_stat(&fscache_n_cop_update_object);
45023 object->cache->ops->update_object(object);
45024 fscache_stat_d(&fscache_n_cop_update_object);
45025@@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45026 spin_lock(&object->lock);
45027 object->state = FSCACHE_OBJECT_DEAD;
45028 spin_unlock(&object->lock);
45029- fscache_stat(&fscache_n_object_dead);
45030+ fscache_stat_unchecked(&fscache_n_object_dead);
45031 goto terminal_transit;
45032
45033 /* handle the parent cache of this object being withdrawn from
45034@@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45035 spin_lock(&object->lock);
45036 object->state = FSCACHE_OBJECT_DEAD;
45037 spin_unlock(&object->lock);
45038- fscache_stat(&fscache_n_object_dead);
45039+ fscache_stat_unchecked(&fscache_n_object_dead);
45040 goto terminal_transit;
45041
45042 /* complain about the object being woken up once it is
45043@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
45044 parent->cookie->def->name, cookie->def->name,
45045 object->cache->tag->name);
45046
45047- fscache_stat(&fscache_n_object_lookups);
45048+ fscache_stat_unchecked(&fscache_n_object_lookups);
45049 fscache_stat(&fscache_n_cop_lookup_object);
45050 ret = object->cache->ops->lookup_object(object);
45051 fscache_stat_d(&fscache_n_cop_lookup_object);
45052@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
45053 if (ret == -ETIMEDOUT) {
45054 /* probably stuck behind another object, so move this one to
45055 * the back of the queue */
45056- fscache_stat(&fscache_n_object_lookups_timed_out);
45057+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
45058 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45059 }
45060
45061@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
45062
45063 spin_lock(&object->lock);
45064 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45065- fscache_stat(&fscache_n_object_lookups_negative);
45066+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
45067
45068 /* transit here to allow write requests to begin stacking up
45069 * and read requests to begin returning ENODATA */
45070@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
45071 * result, in which case there may be data available */
45072 spin_lock(&object->lock);
45073 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45074- fscache_stat(&fscache_n_object_lookups_positive);
45075+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
45076
45077 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
45078
45079@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
45080 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45081 } else {
45082 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
45083- fscache_stat(&fscache_n_object_created);
45084+ fscache_stat_unchecked(&fscache_n_object_created);
45085
45086 object->state = FSCACHE_OBJECT_AVAILABLE;
45087 spin_unlock(&object->lock);
45088@@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
45089 fscache_enqueue_dependents(object);
45090
45091 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
45092- fscache_stat(&fscache_n_object_avail);
45093+ fscache_stat_unchecked(&fscache_n_object_avail);
45094
45095 _leave("");
45096 }
45097@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
45098 enum fscache_checkaux result;
45099
45100 if (!object->cookie->def->check_aux) {
45101- fscache_stat(&fscache_n_checkaux_none);
45102+ fscache_stat_unchecked(&fscache_n_checkaux_none);
45103 return FSCACHE_CHECKAUX_OKAY;
45104 }
45105
45106@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
45107 switch (result) {
45108 /* entry okay as is */
45109 case FSCACHE_CHECKAUX_OKAY:
45110- fscache_stat(&fscache_n_checkaux_okay);
45111+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
45112 break;
45113
45114 /* entry requires update */
45115 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
45116- fscache_stat(&fscache_n_checkaux_update);
45117+ fscache_stat_unchecked(&fscache_n_checkaux_update);
45118 break;
45119
45120 /* entry requires deletion */
45121 case FSCACHE_CHECKAUX_OBSOLETE:
45122- fscache_stat(&fscache_n_checkaux_obsolete);
45123+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
45124 break;
45125
45126 default:
45127diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
45128index 30afdfa..2256596 100644
45129--- a/fs/fscache/operation.c
45130+++ b/fs/fscache/operation.c
45131@@ -17,7 +17,7 @@
45132 #include <linux/slab.h>
45133 #include "internal.h"
45134
45135-atomic_t fscache_op_debug_id;
45136+atomic_unchecked_t fscache_op_debug_id;
45137 EXPORT_SYMBOL(fscache_op_debug_id);
45138
45139 /**
45140@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
45141 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
45142 ASSERTCMP(atomic_read(&op->usage), >, 0);
45143
45144- fscache_stat(&fscache_n_op_enqueue);
45145+ fscache_stat_unchecked(&fscache_n_op_enqueue);
45146 switch (op->flags & FSCACHE_OP_TYPE) {
45147 case FSCACHE_OP_ASYNC:
45148 _debug("queue async");
45149@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
45150 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
45151 if (op->processor)
45152 fscache_enqueue_operation(op);
45153- fscache_stat(&fscache_n_op_run);
45154+ fscache_stat_unchecked(&fscache_n_op_run);
45155 }
45156
45157 /*
45158@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45159 if (object->n_ops > 1) {
45160 atomic_inc(&op->usage);
45161 list_add_tail(&op->pend_link, &object->pending_ops);
45162- fscache_stat(&fscache_n_op_pend);
45163+ fscache_stat_unchecked(&fscache_n_op_pend);
45164 } else if (!list_empty(&object->pending_ops)) {
45165 atomic_inc(&op->usage);
45166 list_add_tail(&op->pend_link, &object->pending_ops);
45167- fscache_stat(&fscache_n_op_pend);
45168+ fscache_stat_unchecked(&fscache_n_op_pend);
45169 fscache_start_operations(object);
45170 } else {
45171 ASSERTCMP(object->n_in_progress, ==, 0);
45172@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45173 object->n_exclusive++; /* reads and writes must wait */
45174 atomic_inc(&op->usage);
45175 list_add_tail(&op->pend_link, &object->pending_ops);
45176- fscache_stat(&fscache_n_op_pend);
45177+ fscache_stat_unchecked(&fscache_n_op_pend);
45178 ret = 0;
45179 } else {
45180 /* not allowed to submit ops in any other state */
45181@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
45182 if (object->n_exclusive > 0) {
45183 atomic_inc(&op->usage);
45184 list_add_tail(&op->pend_link, &object->pending_ops);
45185- fscache_stat(&fscache_n_op_pend);
45186+ fscache_stat_unchecked(&fscache_n_op_pend);
45187 } else if (!list_empty(&object->pending_ops)) {
45188 atomic_inc(&op->usage);
45189 list_add_tail(&op->pend_link, &object->pending_ops);
45190- fscache_stat(&fscache_n_op_pend);
45191+ fscache_stat_unchecked(&fscache_n_op_pend);
45192 fscache_start_operations(object);
45193 } else {
45194 ASSERTCMP(object->n_exclusive, ==, 0);
45195@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
45196 object->n_ops++;
45197 atomic_inc(&op->usage);
45198 list_add_tail(&op->pend_link, &object->pending_ops);
45199- fscache_stat(&fscache_n_op_pend);
45200+ fscache_stat_unchecked(&fscache_n_op_pend);
45201 ret = 0;
45202 } else if (object->state == FSCACHE_OBJECT_DYING ||
45203 object->state == FSCACHE_OBJECT_LC_DYING ||
45204 object->state == FSCACHE_OBJECT_WITHDRAWING) {
45205- fscache_stat(&fscache_n_op_rejected);
45206+ fscache_stat_unchecked(&fscache_n_op_rejected);
45207 ret = -ENOBUFS;
45208 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
45209 fscache_report_unexpected_submission(object, op, ostate);
45210@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
45211
45212 ret = -EBUSY;
45213 if (!list_empty(&op->pend_link)) {
45214- fscache_stat(&fscache_n_op_cancelled);
45215+ fscache_stat_unchecked(&fscache_n_op_cancelled);
45216 list_del_init(&op->pend_link);
45217 object->n_ops--;
45218 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
45219@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
45220 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
45221 BUG();
45222
45223- fscache_stat(&fscache_n_op_release);
45224+ fscache_stat_unchecked(&fscache_n_op_release);
45225
45226 if (op->release) {
45227 op->release(op);
45228@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
45229 * lock, and defer it otherwise */
45230 if (!spin_trylock(&object->lock)) {
45231 _debug("defer put");
45232- fscache_stat(&fscache_n_op_deferred_release);
45233+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
45234
45235 cache = object->cache;
45236 spin_lock(&cache->op_gc_list_lock);
45237@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
45238
45239 _debug("GC DEFERRED REL OBJ%x OP%x",
45240 object->debug_id, op->debug_id);
45241- fscache_stat(&fscache_n_op_gc);
45242+ fscache_stat_unchecked(&fscache_n_op_gc);
45243
45244 ASSERTCMP(atomic_read(&op->usage), ==, 0);
45245
45246diff --git a/fs/fscache/page.c b/fs/fscache/page.c
45247index 3f7a59b..cf196cc 100644
45248--- a/fs/fscache/page.c
45249+++ b/fs/fscache/page.c
45250@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45251 val = radix_tree_lookup(&cookie->stores, page->index);
45252 if (!val) {
45253 rcu_read_unlock();
45254- fscache_stat(&fscache_n_store_vmscan_not_storing);
45255+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
45256 __fscache_uncache_page(cookie, page);
45257 return true;
45258 }
45259@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45260 spin_unlock(&cookie->stores_lock);
45261
45262 if (xpage) {
45263- fscache_stat(&fscache_n_store_vmscan_cancelled);
45264- fscache_stat(&fscache_n_store_radix_deletes);
45265+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
45266+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45267 ASSERTCMP(xpage, ==, page);
45268 } else {
45269- fscache_stat(&fscache_n_store_vmscan_gone);
45270+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
45271 }
45272
45273 wake_up_bit(&cookie->flags, 0);
45274@@ -107,7 +107,7 @@ page_busy:
45275 /* we might want to wait here, but that could deadlock the allocator as
45276 * the work threads writing to the cache may all end up sleeping
45277 * on memory allocation */
45278- fscache_stat(&fscache_n_store_vmscan_busy);
45279+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
45280 return false;
45281 }
45282 EXPORT_SYMBOL(__fscache_maybe_release_page);
45283@@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
45284 FSCACHE_COOKIE_STORING_TAG);
45285 if (!radix_tree_tag_get(&cookie->stores, page->index,
45286 FSCACHE_COOKIE_PENDING_TAG)) {
45287- fscache_stat(&fscache_n_store_radix_deletes);
45288+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45289 xpage = radix_tree_delete(&cookie->stores, page->index);
45290 }
45291 spin_unlock(&cookie->stores_lock);
45292@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
45293
45294 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
45295
45296- fscache_stat(&fscache_n_attr_changed_calls);
45297+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
45298
45299 if (fscache_object_is_active(object)) {
45300 fscache_stat(&fscache_n_cop_attr_changed);
45301@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45302
45303 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45304
45305- fscache_stat(&fscache_n_attr_changed);
45306+ fscache_stat_unchecked(&fscache_n_attr_changed);
45307
45308 op = kzalloc(sizeof(*op), GFP_KERNEL);
45309 if (!op) {
45310- fscache_stat(&fscache_n_attr_changed_nomem);
45311+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
45312 _leave(" = -ENOMEM");
45313 return -ENOMEM;
45314 }
45315@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45316 if (fscache_submit_exclusive_op(object, op) < 0)
45317 goto nobufs;
45318 spin_unlock(&cookie->lock);
45319- fscache_stat(&fscache_n_attr_changed_ok);
45320+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
45321 fscache_put_operation(op);
45322 _leave(" = 0");
45323 return 0;
45324@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45325 nobufs:
45326 spin_unlock(&cookie->lock);
45327 kfree(op);
45328- fscache_stat(&fscache_n_attr_changed_nobufs);
45329+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
45330 _leave(" = %d", -ENOBUFS);
45331 return -ENOBUFS;
45332 }
45333@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
45334 /* allocate a retrieval operation and attempt to submit it */
45335 op = kzalloc(sizeof(*op), GFP_NOIO);
45336 if (!op) {
45337- fscache_stat(&fscache_n_retrievals_nomem);
45338+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45339 return NULL;
45340 }
45341
45342@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45343 return 0;
45344 }
45345
45346- fscache_stat(&fscache_n_retrievals_wait);
45347+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
45348
45349 jif = jiffies;
45350 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
45351 fscache_wait_bit_interruptible,
45352 TASK_INTERRUPTIBLE) != 0) {
45353- fscache_stat(&fscache_n_retrievals_intr);
45354+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
45355 _leave(" = -ERESTARTSYS");
45356 return -ERESTARTSYS;
45357 }
45358@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45359 */
45360 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45361 struct fscache_retrieval *op,
45362- atomic_t *stat_op_waits,
45363- atomic_t *stat_object_dead)
45364+ atomic_unchecked_t *stat_op_waits,
45365+ atomic_unchecked_t *stat_object_dead)
45366 {
45367 int ret;
45368
45369@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45370 goto check_if_dead;
45371
45372 _debug(">>> WT");
45373- fscache_stat(stat_op_waits);
45374+ fscache_stat_unchecked(stat_op_waits);
45375 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
45376 fscache_wait_bit_interruptible,
45377 TASK_INTERRUPTIBLE) < 0) {
45378@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45379
45380 check_if_dead:
45381 if (unlikely(fscache_object_is_dead(object))) {
45382- fscache_stat(stat_object_dead);
45383+ fscache_stat_unchecked(stat_object_dead);
45384 return -ENOBUFS;
45385 }
45386 return 0;
45387@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45388
45389 _enter("%p,%p,,,", cookie, page);
45390
45391- fscache_stat(&fscache_n_retrievals);
45392+ fscache_stat_unchecked(&fscache_n_retrievals);
45393
45394 if (hlist_empty(&cookie->backing_objects))
45395 goto nobufs;
45396@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45397 goto nobufs_unlock;
45398 spin_unlock(&cookie->lock);
45399
45400- fscache_stat(&fscache_n_retrieval_ops);
45401+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
45402
45403 /* pin the netfs read context in case we need to do the actual netfs
45404 * read because we've encountered a cache read failure */
45405@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45406
45407 error:
45408 if (ret == -ENOMEM)
45409- fscache_stat(&fscache_n_retrievals_nomem);
45410+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45411 else if (ret == -ERESTARTSYS)
45412- fscache_stat(&fscache_n_retrievals_intr);
45413+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
45414 else if (ret == -ENODATA)
45415- fscache_stat(&fscache_n_retrievals_nodata);
45416+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45417 else if (ret < 0)
45418- fscache_stat(&fscache_n_retrievals_nobufs);
45419+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45420 else
45421- fscache_stat(&fscache_n_retrievals_ok);
45422+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
45423
45424 fscache_put_retrieval(op);
45425 _leave(" = %d", ret);
45426@@ -429,7 +429,7 @@ nobufs_unlock:
45427 spin_unlock(&cookie->lock);
45428 kfree(op);
45429 nobufs:
45430- fscache_stat(&fscache_n_retrievals_nobufs);
45431+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45432 _leave(" = -ENOBUFS");
45433 return -ENOBUFS;
45434 }
45435@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45436
45437 _enter("%p,,%d,,,", cookie, *nr_pages);
45438
45439- fscache_stat(&fscache_n_retrievals);
45440+ fscache_stat_unchecked(&fscache_n_retrievals);
45441
45442 if (hlist_empty(&cookie->backing_objects))
45443 goto nobufs;
45444@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45445 goto nobufs_unlock;
45446 spin_unlock(&cookie->lock);
45447
45448- fscache_stat(&fscache_n_retrieval_ops);
45449+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
45450
45451 /* pin the netfs read context in case we need to do the actual netfs
45452 * read because we've encountered a cache read failure */
45453@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45454
45455 error:
45456 if (ret == -ENOMEM)
45457- fscache_stat(&fscache_n_retrievals_nomem);
45458+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45459 else if (ret == -ERESTARTSYS)
45460- fscache_stat(&fscache_n_retrievals_intr);
45461+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
45462 else if (ret == -ENODATA)
45463- fscache_stat(&fscache_n_retrievals_nodata);
45464+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45465 else if (ret < 0)
45466- fscache_stat(&fscache_n_retrievals_nobufs);
45467+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45468 else
45469- fscache_stat(&fscache_n_retrievals_ok);
45470+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
45471
45472 fscache_put_retrieval(op);
45473 _leave(" = %d", ret);
45474@@ -545,7 +545,7 @@ nobufs_unlock:
45475 spin_unlock(&cookie->lock);
45476 kfree(op);
45477 nobufs:
45478- fscache_stat(&fscache_n_retrievals_nobufs);
45479+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45480 _leave(" = -ENOBUFS");
45481 return -ENOBUFS;
45482 }
45483@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45484
45485 _enter("%p,%p,,,", cookie, page);
45486
45487- fscache_stat(&fscache_n_allocs);
45488+ fscache_stat_unchecked(&fscache_n_allocs);
45489
45490 if (hlist_empty(&cookie->backing_objects))
45491 goto nobufs;
45492@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45493 goto nobufs_unlock;
45494 spin_unlock(&cookie->lock);
45495
45496- fscache_stat(&fscache_n_alloc_ops);
45497+ fscache_stat_unchecked(&fscache_n_alloc_ops);
45498
45499 ret = fscache_wait_for_retrieval_activation(
45500 object, op,
45501@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45502
45503 error:
45504 if (ret == -ERESTARTSYS)
45505- fscache_stat(&fscache_n_allocs_intr);
45506+ fscache_stat_unchecked(&fscache_n_allocs_intr);
45507 else if (ret < 0)
45508- fscache_stat(&fscache_n_allocs_nobufs);
45509+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45510 else
45511- fscache_stat(&fscache_n_allocs_ok);
45512+ fscache_stat_unchecked(&fscache_n_allocs_ok);
45513
45514 fscache_put_retrieval(op);
45515 _leave(" = %d", ret);
45516@@ -625,7 +625,7 @@ nobufs_unlock:
45517 spin_unlock(&cookie->lock);
45518 kfree(op);
45519 nobufs:
45520- fscache_stat(&fscache_n_allocs_nobufs);
45521+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45522 _leave(" = -ENOBUFS");
45523 return -ENOBUFS;
45524 }
45525@@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45526
45527 spin_lock(&cookie->stores_lock);
45528
45529- fscache_stat(&fscache_n_store_calls);
45530+ fscache_stat_unchecked(&fscache_n_store_calls);
45531
45532 /* find a page to store */
45533 page = NULL;
45534@@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45535 page = results[0];
45536 _debug("gang %d [%lx]", n, page->index);
45537 if (page->index > op->store_limit) {
45538- fscache_stat(&fscache_n_store_pages_over_limit);
45539+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
45540 goto superseded;
45541 }
45542
45543@@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45544 spin_unlock(&cookie->stores_lock);
45545 spin_unlock(&object->lock);
45546
45547- fscache_stat(&fscache_n_store_pages);
45548+ fscache_stat_unchecked(&fscache_n_store_pages);
45549 fscache_stat(&fscache_n_cop_write_page);
45550 ret = object->cache->ops->write_page(op, page);
45551 fscache_stat_d(&fscache_n_cop_write_page);
45552@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45553 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45554 ASSERT(PageFsCache(page));
45555
45556- fscache_stat(&fscache_n_stores);
45557+ fscache_stat_unchecked(&fscache_n_stores);
45558
45559 op = kzalloc(sizeof(*op), GFP_NOIO);
45560 if (!op)
45561@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45562 spin_unlock(&cookie->stores_lock);
45563 spin_unlock(&object->lock);
45564
45565- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
45566+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
45567 op->store_limit = object->store_limit;
45568
45569 if (fscache_submit_op(object, &op->op) < 0)
45570@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45571
45572 spin_unlock(&cookie->lock);
45573 radix_tree_preload_end();
45574- fscache_stat(&fscache_n_store_ops);
45575- fscache_stat(&fscache_n_stores_ok);
45576+ fscache_stat_unchecked(&fscache_n_store_ops);
45577+ fscache_stat_unchecked(&fscache_n_stores_ok);
45578
45579 /* the work queue now carries its own ref on the object */
45580 fscache_put_operation(&op->op);
45581@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45582 return 0;
45583
45584 already_queued:
45585- fscache_stat(&fscache_n_stores_again);
45586+ fscache_stat_unchecked(&fscache_n_stores_again);
45587 already_pending:
45588 spin_unlock(&cookie->stores_lock);
45589 spin_unlock(&object->lock);
45590 spin_unlock(&cookie->lock);
45591 radix_tree_preload_end();
45592 kfree(op);
45593- fscache_stat(&fscache_n_stores_ok);
45594+ fscache_stat_unchecked(&fscache_n_stores_ok);
45595 _leave(" = 0");
45596 return 0;
45597
45598@@ -851,14 +851,14 @@ nobufs:
45599 spin_unlock(&cookie->lock);
45600 radix_tree_preload_end();
45601 kfree(op);
45602- fscache_stat(&fscache_n_stores_nobufs);
45603+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
45604 _leave(" = -ENOBUFS");
45605 return -ENOBUFS;
45606
45607 nomem_free:
45608 kfree(op);
45609 nomem:
45610- fscache_stat(&fscache_n_stores_oom);
45611+ fscache_stat_unchecked(&fscache_n_stores_oom);
45612 _leave(" = -ENOMEM");
45613 return -ENOMEM;
45614 }
45615@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
45616 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45617 ASSERTCMP(page, !=, NULL);
45618
45619- fscache_stat(&fscache_n_uncaches);
45620+ fscache_stat_unchecked(&fscache_n_uncaches);
45621
45622 /* cache withdrawal may beat us to it */
45623 if (!PageFsCache(page))
45624@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
45625 unsigned long loop;
45626
45627 #ifdef CONFIG_FSCACHE_STATS
45628- atomic_add(pagevec->nr, &fscache_n_marks);
45629+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
45630 #endif
45631
45632 for (loop = 0; loop < pagevec->nr; loop++) {
45633diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
45634index 4765190..2a067f2 100644
45635--- a/fs/fscache/stats.c
45636+++ b/fs/fscache/stats.c
45637@@ -18,95 +18,95 @@
45638 /*
45639 * operation counters
45640 */
45641-atomic_t fscache_n_op_pend;
45642-atomic_t fscache_n_op_run;
45643-atomic_t fscache_n_op_enqueue;
45644-atomic_t fscache_n_op_requeue;
45645-atomic_t fscache_n_op_deferred_release;
45646-atomic_t fscache_n_op_release;
45647-atomic_t fscache_n_op_gc;
45648-atomic_t fscache_n_op_cancelled;
45649-atomic_t fscache_n_op_rejected;
45650+atomic_unchecked_t fscache_n_op_pend;
45651+atomic_unchecked_t fscache_n_op_run;
45652+atomic_unchecked_t fscache_n_op_enqueue;
45653+atomic_unchecked_t fscache_n_op_requeue;
45654+atomic_unchecked_t fscache_n_op_deferred_release;
45655+atomic_unchecked_t fscache_n_op_release;
45656+atomic_unchecked_t fscache_n_op_gc;
45657+atomic_unchecked_t fscache_n_op_cancelled;
45658+atomic_unchecked_t fscache_n_op_rejected;
45659
45660-atomic_t fscache_n_attr_changed;
45661-atomic_t fscache_n_attr_changed_ok;
45662-atomic_t fscache_n_attr_changed_nobufs;
45663-atomic_t fscache_n_attr_changed_nomem;
45664-atomic_t fscache_n_attr_changed_calls;
45665+atomic_unchecked_t fscache_n_attr_changed;
45666+atomic_unchecked_t fscache_n_attr_changed_ok;
45667+atomic_unchecked_t fscache_n_attr_changed_nobufs;
45668+atomic_unchecked_t fscache_n_attr_changed_nomem;
45669+atomic_unchecked_t fscache_n_attr_changed_calls;
45670
45671-atomic_t fscache_n_allocs;
45672-atomic_t fscache_n_allocs_ok;
45673-atomic_t fscache_n_allocs_wait;
45674-atomic_t fscache_n_allocs_nobufs;
45675-atomic_t fscache_n_allocs_intr;
45676-atomic_t fscache_n_allocs_object_dead;
45677-atomic_t fscache_n_alloc_ops;
45678-atomic_t fscache_n_alloc_op_waits;
45679+atomic_unchecked_t fscache_n_allocs;
45680+atomic_unchecked_t fscache_n_allocs_ok;
45681+atomic_unchecked_t fscache_n_allocs_wait;
45682+atomic_unchecked_t fscache_n_allocs_nobufs;
45683+atomic_unchecked_t fscache_n_allocs_intr;
45684+atomic_unchecked_t fscache_n_allocs_object_dead;
45685+atomic_unchecked_t fscache_n_alloc_ops;
45686+atomic_unchecked_t fscache_n_alloc_op_waits;
45687
45688-atomic_t fscache_n_retrievals;
45689-atomic_t fscache_n_retrievals_ok;
45690-atomic_t fscache_n_retrievals_wait;
45691-atomic_t fscache_n_retrievals_nodata;
45692-atomic_t fscache_n_retrievals_nobufs;
45693-atomic_t fscache_n_retrievals_intr;
45694-atomic_t fscache_n_retrievals_nomem;
45695-atomic_t fscache_n_retrievals_object_dead;
45696-atomic_t fscache_n_retrieval_ops;
45697-atomic_t fscache_n_retrieval_op_waits;
45698+atomic_unchecked_t fscache_n_retrievals;
45699+atomic_unchecked_t fscache_n_retrievals_ok;
45700+atomic_unchecked_t fscache_n_retrievals_wait;
45701+atomic_unchecked_t fscache_n_retrievals_nodata;
45702+atomic_unchecked_t fscache_n_retrievals_nobufs;
45703+atomic_unchecked_t fscache_n_retrievals_intr;
45704+atomic_unchecked_t fscache_n_retrievals_nomem;
45705+atomic_unchecked_t fscache_n_retrievals_object_dead;
45706+atomic_unchecked_t fscache_n_retrieval_ops;
45707+atomic_unchecked_t fscache_n_retrieval_op_waits;
45708
45709-atomic_t fscache_n_stores;
45710-atomic_t fscache_n_stores_ok;
45711-atomic_t fscache_n_stores_again;
45712-atomic_t fscache_n_stores_nobufs;
45713-atomic_t fscache_n_stores_oom;
45714-atomic_t fscache_n_store_ops;
45715-atomic_t fscache_n_store_calls;
45716-atomic_t fscache_n_store_pages;
45717-atomic_t fscache_n_store_radix_deletes;
45718-atomic_t fscache_n_store_pages_over_limit;
45719+atomic_unchecked_t fscache_n_stores;
45720+atomic_unchecked_t fscache_n_stores_ok;
45721+atomic_unchecked_t fscache_n_stores_again;
45722+atomic_unchecked_t fscache_n_stores_nobufs;
45723+atomic_unchecked_t fscache_n_stores_oom;
45724+atomic_unchecked_t fscache_n_store_ops;
45725+atomic_unchecked_t fscache_n_store_calls;
45726+atomic_unchecked_t fscache_n_store_pages;
45727+atomic_unchecked_t fscache_n_store_radix_deletes;
45728+atomic_unchecked_t fscache_n_store_pages_over_limit;
45729
45730-atomic_t fscache_n_store_vmscan_not_storing;
45731-atomic_t fscache_n_store_vmscan_gone;
45732-atomic_t fscache_n_store_vmscan_busy;
45733-atomic_t fscache_n_store_vmscan_cancelled;
45734+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
45735+atomic_unchecked_t fscache_n_store_vmscan_gone;
45736+atomic_unchecked_t fscache_n_store_vmscan_busy;
45737+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
45738
45739-atomic_t fscache_n_marks;
45740-atomic_t fscache_n_uncaches;
45741+atomic_unchecked_t fscache_n_marks;
45742+atomic_unchecked_t fscache_n_uncaches;
45743
45744-atomic_t fscache_n_acquires;
45745-atomic_t fscache_n_acquires_null;
45746-atomic_t fscache_n_acquires_no_cache;
45747-atomic_t fscache_n_acquires_ok;
45748-atomic_t fscache_n_acquires_nobufs;
45749-atomic_t fscache_n_acquires_oom;
45750+atomic_unchecked_t fscache_n_acquires;
45751+atomic_unchecked_t fscache_n_acquires_null;
45752+atomic_unchecked_t fscache_n_acquires_no_cache;
45753+atomic_unchecked_t fscache_n_acquires_ok;
45754+atomic_unchecked_t fscache_n_acquires_nobufs;
45755+atomic_unchecked_t fscache_n_acquires_oom;
45756
45757-atomic_t fscache_n_updates;
45758-atomic_t fscache_n_updates_null;
45759-atomic_t fscache_n_updates_run;
45760+atomic_unchecked_t fscache_n_updates;
45761+atomic_unchecked_t fscache_n_updates_null;
45762+atomic_unchecked_t fscache_n_updates_run;
45763
45764-atomic_t fscache_n_relinquishes;
45765-atomic_t fscache_n_relinquishes_null;
45766-atomic_t fscache_n_relinquishes_waitcrt;
45767-atomic_t fscache_n_relinquishes_retire;
45768+atomic_unchecked_t fscache_n_relinquishes;
45769+atomic_unchecked_t fscache_n_relinquishes_null;
45770+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
45771+atomic_unchecked_t fscache_n_relinquishes_retire;
45772
45773-atomic_t fscache_n_cookie_index;
45774-atomic_t fscache_n_cookie_data;
45775-atomic_t fscache_n_cookie_special;
45776+atomic_unchecked_t fscache_n_cookie_index;
45777+atomic_unchecked_t fscache_n_cookie_data;
45778+atomic_unchecked_t fscache_n_cookie_special;
45779
45780-atomic_t fscache_n_object_alloc;
45781-atomic_t fscache_n_object_no_alloc;
45782-atomic_t fscache_n_object_lookups;
45783-atomic_t fscache_n_object_lookups_negative;
45784-atomic_t fscache_n_object_lookups_positive;
45785-atomic_t fscache_n_object_lookups_timed_out;
45786-atomic_t fscache_n_object_created;
45787-atomic_t fscache_n_object_avail;
45788-atomic_t fscache_n_object_dead;
45789+atomic_unchecked_t fscache_n_object_alloc;
45790+atomic_unchecked_t fscache_n_object_no_alloc;
45791+atomic_unchecked_t fscache_n_object_lookups;
45792+atomic_unchecked_t fscache_n_object_lookups_negative;
45793+atomic_unchecked_t fscache_n_object_lookups_positive;
45794+atomic_unchecked_t fscache_n_object_lookups_timed_out;
45795+atomic_unchecked_t fscache_n_object_created;
45796+atomic_unchecked_t fscache_n_object_avail;
45797+atomic_unchecked_t fscache_n_object_dead;
45798
45799-atomic_t fscache_n_checkaux_none;
45800-atomic_t fscache_n_checkaux_okay;
45801-atomic_t fscache_n_checkaux_update;
45802-atomic_t fscache_n_checkaux_obsolete;
45803+atomic_unchecked_t fscache_n_checkaux_none;
45804+atomic_unchecked_t fscache_n_checkaux_okay;
45805+atomic_unchecked_t fscache_n_checkaux_update;
45806+atomic_unchecked_t fscache_n_checkaux_obsolete;
45807
45808 atomic_t fscache_n_cop_alloc_object;
45809 atomic_t fscache_n_cop_lookup_object;
45810@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
45811 seq_puts(m, "FS-Cache statistics\n");
45812
45813 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
45814- atomic_read(&fscache_n_cookie_index),
45815- atomic_read(&fscache_n_cookie_data),
45816- atomic_read(&fscache_n_cookie_special));
45817+ atomic_read_unchecked(&fscache_n_cookie_index),
45818+ atomic_read_unchecked(&fscache_n_cookie_data),
45819+ atomic_read_unchecked(&fscache_n_cookie_special));
45820
45821 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
45822- atomic_read(&fscache_n_object_alloc),
45823- atomic_read(&fscache_n_object_no_alloc),
45824- atomic_read(&fscache_n_object_avail),
45825- atomic_read(&fscache_n_object_dead));
45826+ atomic_read_unchecked(&fscache_n_object_alloc),
45827+ atomic_read_unchecked(&fscache_n_object_no_alloc),
45828+ atomic_read_unchecked(&fscache_n_object_avail),
45829+ atomic_read_unchecked(&fscache_n_object_dead));
45830 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
45831- atomic_read(&fscache_n_checkaux_none),
45832- atomic_read(&fscache_n_checkaux_okay),
45833- atomic_read(&fscache_n_checkaux_update),
45834- atomic_read(&fscache_n_checkaux_obsolete));
45835+ atomic_read_unchecked(&fscache_n_checkaux_none),
45836+ atomic_read_unchecked(&fscache_n_checkaux_okay),
45837+ atomic_read_unchecked(&fscache_n_checkaux_update),
45838+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
45839
45840 seq_printf(m, "Pages : mrk=%u unc=%u\n",
45841- atomic_read(&fscache_n_marks),
45842- atomic_read(&fscache_n_uncaches));
45843+ atomic_read_unchecked(&fscache_n_marks),
45844+ atomic_read_unchecked(&fscache_n_uncaches));
45845
45846 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
45847 " oom=%u\n",
45848- atomic_read(&fscache_n_acquires),
45849- atomic_read(&fscache_n_acquires_null),
45850- atomic_read(&fscache_n_acquires_no_cache),
45851- atomic_read(&fscache_n_acquires_ok),
45852- atomic_read(&fscache_n_acquires_nobufs),
45853- atomic_read(&fscache_n_acquires_oom));
45854+ atomic_read_unchecked(&fscache_n_acquires),
45855+ atomic_read_unchecked(&fscache_n_acquires_null),
45856+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
45857+ atomic_read_unchecked(&fscache_n_acquires_ok),
45858+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
45859+ atomic_read_unchecked(&fscache_n_acquires_oom));
45860
45861 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
45862- atomic_read(&fscache_n_object_lookups),
45863- atomic_read(&fscache_n_object_lookups_negative),
45864- atomic_read(&fscache_n_object_lookups_positive),
45865- atomic_read(&fscache_n_object_created),
45866- atomic_read(&fscache_n_object_lookups_timed_out));
45867+ atomic_read_unchecked(&fscache_n_object_lookups),
45868+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
45869+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
45870+ atomic_read_unchecked(&fscache_n_object_created),
45871+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
45872
45873 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
45874- atomic_read(&fscache_n_updates),
45875- atomic_read(&fscache_n_updates_null),
45876- atomic_read(&fscache_n_updates_run));
45877+ atomic_read_unchecked(&fscache_n_updates),
45878+ atomic_read_unchecked(&fscache_n_updates_null),
45879+ atomic_read_unchecked(&fscache_n_updates_run));
45880
45881 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
45882- atomic_read(&fscache_n_relinquishes),
45883- atomic_read(&fscache_n_relinquishes_null),
45884- atomic_read(&fscache_n_relinquishes_waitcrt),
45885- atomic_read(&fscache_n_relinquishes_retire));
45886+ atomic_read_unchecked(&fscache_n_relinquishes),
45887+ atomic_read_unchecked(&fscache_n_relinquishes_null),
45888+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
45889+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
45890
45891 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
45892- atomic_read(&fscache_n_attr_changed),
45893- atomic_read(&fscache_n_attr_changed_ok),
45894- atomic_read(&fscache_n_attr_changed_nobufs),
45895- atomic_read(&fscache_n_attr_changed_nomem),
45896- atomic_read(&fscache_n_attr_changed_calls));
45897+ atomic_read_unchecked(&fscache_n_attr_changed),
45898+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
45899+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
45900+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
45901+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
45902
45903 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
45904- atomic_read(&fscache_n_allocs),
45905- atomic_read(&fscache_n_allocs_ok),
45906- atomic_read(&fscache_n_allocs_wait),
45907- atomic_read(&fscache_n_allocs_nobufs),
45908- atomic_read(&fscache_n_allocs_intr));
45909+ atomic_read_unchecked(&fscache_n_allocs),
45910+ atomic_read_unchecked(&fscache_n_allocs_ok),
45911+ atomic_read_unchecked(&fscache_n_allocs_wait),
45912+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
45913+ atomic_read_unchecked(&fscache_n_allocs_intr));
45914 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
45915- atomic_read(&fscache_n_alloc_ops),
45916- atomic_read(&fscache_n_alloc_op_waits),
45917- atomic_read(&fscache_n_allocs_object_dead));
45918+ atomic_read_unchecked(&fscache_n_alloc_ops),
45919+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
45920+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
45921
45922 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
45923 " int=%u oom=%u\n",
45924- atomic_read(&fscache_n_retrievals),
45925- atomic_read(&fscache_n_retrievals_ok),
45926- atomic_read(&fscache_n_retrievals_wait),
45927- atomic_read(&fscache_n_retrievals_nodata),
45928- atomic_read(&fscache_n_retrievals_nobufs),
45929- atomic_read(&fscache_n_retrievals_intr),
45930- atomic_read(&fscache_n_retrievals_nomem));
45931+ atomic_read_unchecked(&fscache_n_retrievals),
45932+ atomic_read_unchecked(&fscache_n_retrievals_ok),
45933+ atomic_read_unchecked(&fscache_n_retrievals_wait),
45934+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
45935+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
45936+ atomic_read_unchecked(&fscache_n_retrievals_intr),
45937+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
45938 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
45939- atomic_read(&fscache_n_retrieval_ops),
45940- atomic_read(&fscache_n_retrieval_op_waits),
45941- atomic_read(&fscache_n_retrievals_object_dead));
45942+ atomic_read_unchecked(&fscache_n_retrieval_ops),
45943+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
45944+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
45945
45946 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
45947- atomic_read(&fscache_n_stores),
45948- atomic_read(&fscache_n_stores_ok),
45949- atomic_read(&fscache_n_stores_again),
45950- atomic_read(&fscache_n_stores_nobufs),
45951- atomic_read(&fscache_n_stores_oom));
45952+ atomic_read_unchecked(&fscache_n_stores),
45953+ atomic_read_unchecked(&fscache_n_stores_ok),
45954+ atomic_read_unchecked(&fscache_n_stores_again),
45955+ atomic_read_unchecked(&fscache_n_stores_nobufs),
45956+ atomic_read_unchecked(&fscache_n_stores_oom));
45957 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
45958- atomic_read(&fscache_n_store_ops),
45959- atomic_read(&fscache_n_store_calls),
45960- atomic_read(&fscache_n_store_pages),
45961- atomic_read(&fscache_n_store_radix_deletes),
45962- atomic_read(&fscache_n_store_pages_over_limit));
45963+ atomic_read_unchecked(&fscache_n_store_ops),
45964+ atomic_read_unchecked(&fscache_n_store_calls),
45965+ atomic_read_unchecked(&fscache_n_store_pages),
45966+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
45967+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
45968
45969 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
45970- atomic_read(&fscache_n_store_vmscan_not_storing),
45971- atomic_read(&fscache_n_store_vmscan_gone),
45972- atomic_read(&fscache_n_store_vmscan_busy),
45973- atomic_read(&fscache_n_store_vmscan_cancelled));
45974+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
45975+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
45976+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
45977+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
45978
45979 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
45980- atomic_read(&fscache_n_op_pend),
45981- atomic_read(&fscache_n_op_run),
45982- atomic_read(&fscache_n_op_enqueue),
45983- atomic_read(&fscache_n_op_cancelled),
45984- atomic_read(&fscache_n_op_rejected));
45985+ atomic_read_unchecked(&fscache_n_op_pend),
45986+ atomic_read_unchecked(&fscache_n_op_run),
45987+ atomic_read_unchecked(&fscache_n_op_enqueue),
45988+ atomic_read_unchecked(&fscache_n_op_cancelled),
45989+ atomic_read_unchecked(&fscache_n_op_rejected));
45990 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
45991- atomic_read(&fscache_n_op_deferred_release),
45992- atomic_read(&fscache_n_op_release),
45993- atomic_read(&fscache_n_op_gc));
45994+ atomic_read_unchecked(&fscache_n_op_deferred_release),
45995+ atomic_read_unchecked(&fscache_n_op_release),
45996+ atomic_read_unchecked(&fscache_n_op_gc));
45997
45998 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
45999 atomic_read(&fscache_n_cop_alloc_object),
46000diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
46001index 3426521..3b75162 100644
46002--- a/fs/fuse/cuse.c
46003+++ b/fs/fuse/cuse.c
46004@@ -587,10 +587,12 @@ static int __init cuse_init(void)
46005 INIT_LIST_HEAD(&cuse_conntbl[i]);
46006
46007 /* inherit and extend fuse_dev_operations */
46008- cuse_channel_fops = fuse_dev_operations;
46009- cuse_channel_fops.owner = THIS_MODULE;
46010- cuse_channel_fops.open = cuse_channel_open;
46011- cuse_channel_fops.release = cuse_channel_release;
46012+ pax_open_kernel();
46013+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
46014+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
46015+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
46016+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
46017+ pax_close_kernel();
46018
46019 cuse_class = class_create(THIS_MODULE, "cuse");
46020 if (IS_ERR(cuse_class))
46021diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
46022index 7df2b5e..5804aa7 100644
46023--- a/fs/fuse/dev.c
46024+++ b/fs/fuse/dev.c
46025@@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
46026 ret = 0;
46027 pipe_lock(pipe);
46028
46029- if (!pipe->readers) {
46030+ if (!atomic_read(&pipe->readers)) {
46031 send_sig(SIGPIPE, current, 0);
46032 if (!ret)
46033 ret = -EPIPE;
46034diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
46035index bc43832..0cfe5a6 100644
46036--- a/fs/fuse/dir.c
46037+++ b/fs/fuse/dir.c
46038@@ -1181,7 +1181,7 @@ static char *read_link(struct dentry *dentry)
46039 return link;
46040 }
46041
46042-static void free_link(char *link)
46043+static void free_link(const char *link)
46044 {
46045 if (!IS_ERR(link))
46046 free_page((unsigned long) link);
46047diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
46048index a9ba244..d9df391 100644
46049--- a/fs/gfs2/inode.c
46050+++ b/fs/gfs2/inode.c
46051@@ -1496,7 +1496,7 @@ out:
46052
46053 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46054 {
46055- char *s = nd_get_link(nd);
46056+ const char *s = nd_get_link(nd);
46057 if (!IS_ERR(s))
46058 kfree(s);
46059 }
46060diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
46061index 001ef01..f7d5f07 100644
46062--- a/fs/hugetlbfs/inode.c
46063+++ b/fs/hugetlbfs/inode.c
46064@@ -920,7 +920,7 @@ static struct file_system_type hugetlbfs_fs_type = {
46065 .kill_sb = kill_litter_super,
46066 };
46067
46068-static struct vfsmount *hugetlbfs_vfsmount;
46069+struct vfsmount *hugetlbfs_vfsmount;
46070
46071 static int can_do_hugetlb_shm(void)
46072 {
46073diff --git a/fs/inode.c b/fs/inode.c
46074index 9f4f5fe..6214688 100644
46075--- a/fs/inode.c
46076+++ b/fs/inode.c
46077@@ -860,8 +860,8 @@ unsigned int get_next_ino(void)
46078
46079 #ifdef CONFIG_SMP
46080 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
46081- static atomic_t shared_last_ino;
46082- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
46083+ static atomic_unchecked_t shared_last_ino;
46084+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
46085
46086 res = next - LAST_INO_BATCH;
46087 }
46088diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
46089index 4a6cf28..d3a29d3 100644
46090--- a/fs/jffs2/erase.c
46091+++ b/fs/jffs2/erase.c
46092@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
46093 struct jffs2_unknown_node marker = {
46094 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
46095 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46096- .totlen = cpu_to_je32(c->cleanmarker_size)
46097+ .totlen = cpu_to_je32(c->cleanmarker_size),
46098+ .hdr_crc = cpu_to_je32(0)
46099 };
46100
46101 jffs2_prealloc_raw_node_refs(c, jeb, 1);
46102diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
46103index 74d9be1..d5dd140 100644
46104--- a/fs/jffs2/wbuf.c
46105+++ b/fs/jffs2/wbuf.c
46106@@ -1022,7 +1022,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
46107 {
46108 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
46109 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46110- .totlen = constant_cpu_to_je32(8)
46111+ .totlen = constant_cpu_to_je32(8),
46112+ .hdr_crc = constant_cpu_to_je32(0)
46113 };
46114
46115 /*
46116diff --git a/fs/jfs/super.c b/fs/jfs/super.c
46117index 4a82950..bcaa0cb 100644
46118--- a/fs/jfs/super.c
46119+++ b/fs/jfs/super.c
46120@@ -801,7 +801,7 @@ static int __init init_jfs_fs(void)
46121
46122 jfs_inode_cachep =
46123 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
46124- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
46125+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
46126 init_once);
46127 if (jfs_inode_cachep == NULL)
46128 return -ENOMEM;
46129diff --git a/fs/libfs.c b/fs/libfs.c
46130index 18d08f5..fe3dc64 100644
46131--- a/fs/libfs.c
46132+++ b/fs/libfs.c
46133@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
46134
46135 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
46136 struct dentry *next;
46137+ char d_name[sizeof(next->d_iname)];
46138+ const unsigned char *name;
46139+
46140 next = list_entry(p, struct dentry, d_u.d_child);
46141 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
46142 if (!simple_positive(next)) {
46143@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
46144
46145 spin_unlock(&next->d_lock);
46146 spin_unlock(&dentry->d_lock);
46147- if (filldir(dirent, next->d_name.name,
46148+ name = next->d_name.name;
46149+ if (name == next->d_iname) {
46150+ memcpy(d_name, name, next->d_name.len);
46151+ name = d_name;
46152+ }
46153+ if (filldir(dirent, name,
46154 next->d_name.len, filp->f_pos,
46155 next->d_inode->i_ino,
46156 dt_type(next->d_inode)) < 0)
46157diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
46158index 8392cb8..80d6193 100644
46159--- a/fs/lockd/clntproc.c
46160+++ b/fs/lockd/clntproc.c
46161@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
46162 /*
46163 * Cookie counter for NLM requests
46164 */
46165-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
46166+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
46167
46168 void nlmclnt_next_cookie(struct nlm_cookie *c)
46169 {
46170- u32 cookie = atomic_inc_return(&nlm_cookie);
46171+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
46172
46173 memcpy(c->data, &cookie, 4);
46174 c->len=4;
46175diff --git a/fs/locks.c b/fs/locks.c
46176index 0d68f1f..f216b79 100644
46177--- a/fs/locks.c
46178+++ b/fs/locks.c
46179@@ -2075,16 +2075,16 @@ void locks_remove_flock(struct file *filp)
46180 return;
46181
46182 if (filp->f_op && filp->f_op->flock) {
46183- struct file_lock fl = {
46184+ struct file_lock flock = {
46185 .fl_pid = current->tgid,
46186 .fl_file = filp,
46187 .fl_flags = FL_FLOCK,
46188 .fl_type = F_UNLCK,
46189 .fl_end = OFFSET_MAX,
46190 };
46191- filp->f_op->flock(filp, F_SETLKW, &fl);
46192- if (fl.fl_ops && fl.fl_ops->fl_release_private)
46193- fl.fl_ops->fl_release_private(&fl);
46194+ filp->f_op->flock(filp, F_SETLKW, &flock);
46195+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
46196+ flock.fl_ops->fl_release_private(&flock);
46197 }
46198
46199 lock_flocks();
46200diff --git a/fs/namei.c b/fs/namei.c
46201index c427919..e37fd3f 100644
46202--- a/fs/namei.c
46203+++ b/fs/namei.c
46204@@ -278,16 +278,32 @@ int generic_permission(struct inode *inode, int mask)
46205 if (ret != -EACCES)
46206 return ret;
46207
46208+#ifdef CONFIG_GRKERNSEC
46209+ /* we'll block if we have to log due to a denied capability use */
46210+ if (mask & MAY_NOT_BLOCK)
46211+ return -ECHILD;
46212+#endif
46213+
46214 if (S_ISDIR(inode->i_mode)) {
46215 /* DACs are overridable for directories */
46216- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46217- return 0;
46218 if (!(mask & MAY_WRITE))
46219- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46220+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46221+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46222 return 0;
46223+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46224+ return 0;
46225 return -EACCES;
46226 }
46227 /*
46228+ * Searching includes executable on directories, else just read.
46229+ */
46230+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46231+ if (mask == MAY_READ)
46232+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46233+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46234+ return 0;
46235+
46236+ /*
46237 * Read/write DACs are always overridable.
46238 * Executable DACs are overridable when there is
46239 * at least one exec bit set.
46240@@ -296,14 +312,6 @@ int generic_permission(struct inode *inode, int mask)
46241 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46242 return 0;
46243
46244- /*
46245- * Searching includes executable on directories, else just read.
46246- */
46247- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46248- if (mask == MAY_READ)
46249- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46250- return 0;
46251-
46252 return -EACCES;
46253 }
46254
46255@@ -652,11 +660,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
46256 return error;
46257 }
46258
46259+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
46260+ dentry->d_inode, dentry, nd->path.mnt)) {
46261+ error = -EACCES;
46262+ *p = ERR_PTR(error); /* no ->put_link(), please */
46263+ path_put(&nd->path);
46264+ return error;
46265+ }
46266+
46267 nd->last_type = LAST_BIND;
46268 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
46269 error = PTR_ERR(*p);
46270 if (!IS_ERR(*p)) {
46271- char *s = nd_get_link(nd);
46272+ const char *s = nd_get_link(nd);
46273 error = 0;
46274 if (s)
46275 error = __vfs_follow_link(nd, s);
46276@@ -1753,6 +1769,21 @@ static int path_lookupat(int dfd, const char *name,
46277 if (!err)
46278 err = complete_walk(nd);
46279
46280+ if (!(nd->flags & LOOKUP_PARENT)) {
46281+#ifdef CONFIG_GRKERNSEC
46282+ if (flags & LOOKUP_RCU) {
46283+ if (!err)
46284+ path_put(&nd->path);
46285+ err = -ECHILD;
46286+ } else
46287+#endif
46288+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46289+ if (!err)
46290+ path_put(&nd->path);
46291+ err = -ENOENT;
46292+ }
46293+ }
46294+
46295 if (!err && nd->flags & LOOKUP_DIRECTORY) {
46296 if (!nd->inode->i_op->lookup) {
46297 path_put(&nd->path);
46298@@ -1780,6 +1811,15 @@ static int do_path_lookup(int dfd, const char *name,
46299 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
46300
46301 if (likely(!retval)) {
46302+ if (*name != '/' && nd->path.dentry && nd->inode) {
46303+#ifdef CONFIG_GRKERNSEC
46304+ if (flags & LOOKUP_RCU)
46305+ return -ECHILD;
46306+#endif
46307+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
46308+ return -ENOENT;
46309+ }
46310+
46311 if (unlikely(!audit_dummy_context())) {
46312 if (nd->path.dentry && nd->inode)
46313 audit_inode(name, nd->path.dentry);
46314@@ -2126,6 +2166,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
46315 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
46316 return -EPERM;
46317
46318+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
46319+ return -EPERM;
46320+ if (gr_handle_rawio(inode))
46321+ return -EPERM;
46322+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
46323+ return -EACCES;
46324+
46325 return 0;
46326 }
46327
46328@@ -2187,6 +2234,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46329 error = complete_walk(nd);
46330 if (error)
46331 return ERR_PTR(error);
46332+#ifdef CONFIG_GRKERNSEC
46333+ if (nd->flags & LOOKUP_RCU) {
46334+ error = -ECHILD;
46335+ goto exit;
46336+ }
46337+#endif
46338+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46339+ error = -ENOENT;
46340+ goto exit;
46341+ }
46342 audit_inode(pathname, nd->path.dentry);
46343 if (open_flag & O_CREAT) {
46344 error = -EISDIR;
46345@@ -2197,6 +2254,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46346 error = complete_walk(nd);
46347 if (error)
46348 return ERR_PTR(error);
46349+#ifdef CONFIG_GRKERNSEC
46350+ if (nd->flags & LOOKUP_RCU) {
46351+ error = -ECHILD;
46352+ goto exit;
46353+ }
46354+#endif
46355+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
46356+ error = -ENOENT;
46357+ goto exit;
46358+ }
46359 audit_inode(pathname, dir);
46360 goto ok;
46361 }
46362@@ -2218,6 +2285,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46363 error = complete_walk(nd);
46364 if (error)
46365 return ERR_PTR(error);
46366+#ifdef CONFIG_GRKERNSEC
46367+ if (nd->flags & LOOKUP_RCU) {
46368+ error = -ECHILD;
46369+ goto exit;
46370+ }
46371+#endif
46372+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46373+ error = -ENOENT;
46374+ goto exit;
46375+ }
46376
46377 error = -ENOTDIR;
46378 if (nd->flags & LOOKUP_DIRECTORY) {
46379@@ -2258,6 +2335,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46380 /* Negative dentry, just create the file */
46381 if (!dentry->d_inode) {
46382 umode_t mode = op->mode;
46383+
46384+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
46385+ error = -EACCES;
46386+ goto exit_mutex_unlock;
46387+ }
46388+
46389 if (!IS_POSIXACL(dir->d_inode))
46390 mode &= ~current_umask();
46391 /*
46392@@ -2281,6 +2364,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46393 error = vfs_create(dir->d_inode, dentry, mode, nd);
46394 if (error)
46395 goto exit_mutex_unlock;
46396+ else
46397+ gr_handle_create(path->dentry, path->mnt);
46398 mutex_unlock(&dir->d_inode->i_mutex);
46399 dput(nd->path.dentry);
46400 nd->path.dentry = dentry;
46401@@ -2290,6 +2375,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46402 /*
46403 * It already exists.
46404 */
46405+
46406+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
46407+ error = -ENOENT;
46408+ goto exit_mutex_unlock;
46409+ }
46410+
46411+ /* only check if O_CREAT is specified, all other checks need to go
46412+ into may_open */
46413+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
46414+ error = -EACCES;
46415+ goto exit_mutex_unlock;
46416+ }
46417+
46418 mutex_unlock(&dir->d_inode->i_mutex);
46419 audit_inode(pathname, path->dentry);
46420
46421@@ -2502,6 +2600,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
46422 *path = nd.path;
46423 return dentry;
46424 eexist:
46425+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
46426+ dput(dentry);
46427+ dentry = ERR_PTR(-ENOENT);
46428+ goto fail;
46429+ }
46430 dput(dentry);
46431 dentry = ERR_PTR(-EEXIST);
46432 fail:
46433@@ -2524,6 +2627,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
46434 }
46435 EXPORT_SYMBOL(user_path_create);
46436
46437+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
46438+{
46439+ char *tmp = getname(pathname);
46440+ struct dentry *res;
46441+ if (IS_ERR(tmp))
46442+ return ERR_CAST(tmp);
46443+ res = kern_path_create(dfd, tmp, path, is_dir);
46444+ if (IS_ERR(res))
46445+ putname(tmp);
46446+ else
46447+ *to = tmp;
46448+ return res;
46449+}
46450+
46451 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
46452 {
46453 int error = may_create(dir, dentry);
46454@@ -2591,6 +2708,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
46455 error = mnt_want_write(path.mnt);
46456 if (error)
46457 goto out_dput;
46458+
46459+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
46460+ error = -EPERM;
46461+ goto out_drop_write;
46462+ }
46463+
46464+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
46465+ error = -EACCES;
46466+ goto out_drop_write;
46467+ }
46468+
46469 error = security_path_mknod(&path, dentry, mode, dev);
46470 if (error)
46471 goto out_drop_write;
46472@@ -2608,6 +2736,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
46473 }
46474 out_drop_write:
46475 mnt_drop_write(path.mnt);
46476+
46477+ if (!error)
46478+ gr_handle_create(dentry, path.mnt);
46479 out_dput:
46480 dput(dentry);
46481 mutex_unlock(&path.dentry->d_inode->i_mutex);
46482@@ -2661,12 +2792,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
46483 error = mnt_want_write(path.mnt);
46484 if (error)
46485 goto out_dput;
46486+
46487+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
46488+ error = -EACCES;
46489+ goto out_drop_write;
46490+ }
46491+
46492 error = security_path_mkdir(&path, dentry, mode);
46493 if (error)
46494 goto out_drop_write;
46495 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
46496 out_drop_write:
46497 mnt_drop_write(path.mnt);
46498+
46499+ if (!error)
46500+ gr_handle_create(dentry, path.mnt);
46501 out_dput:
46502 dput(dentry);
46503 mutex_unlock(&path.dentry->d_inode->i_mutex);
46504@@ -2746,6 +2886,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46505 char * name;
46506 struct dentry *dentry;
46507 struct nameidata nd;
46508+ ino_t saved_ino = 0;
46509+ dev_t saved_dev = 0;
46510
46511 error = user_path_parent(dfd, pathname, &nd, &name);
46512 if (error)
46513@@ -2774,6 +2916,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
46514 error = -ENOENT;
46515 goto exit3;
46516 }
46517+
46518+ saved_ino = dentry->d_inode->i_ino;
46519+ saved_dev = gr_get_dev_from_dentry(dentry);
46520+
46521+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
46522+ error = -EACCES;
46523+ goto exit3;
46524+ }
46525+
46526 error = mnt_want_write(nd.path.mnt);
46527 if (error)
46528 goto exit3;
46529@@ -2781,6 +2932,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46530 if (error)
46531 goto exit4;
46532 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
46533+ if (!error && (saved_dev || saved_ino))
46534+ gr_handle_delete(saved_ino, saved_dev);
46535 exit4:
46536 mnt_drop_write(nd.path.mnt);
46537 exit3:
46538@@ -2843,6 +2996,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46539 struct dentry *dentry;
46540 struct nameidata nd;
46541 struct inode *inode = NULL;
46542+ ino_t saved_ino = 0;
46543+ dev_t saved_dev = 0;
46544
46545 error = user_path_parent(dfd, pathname, &nd, &name);
46546 if (error)
46547@@ -2865,6 +3020,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46548 if (!inode)
46549 goto slashes;
46550 ihold(inode);
46551+
46552+ if (inode->i_nlink <= 1) {
46553+ saved_ino = inode->i_ino;
46554+ saved_dev = gr_get_dev_from_dentry(dentry);
46555+ }
46556+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
46557+ error = -EACCES;
46558+ goto exit2;
46559+ }
46560+
46561 error = mnt_want_write(nd.path.mnt);
46562 if (error)
46563 goto exit2;
46564@@ -2872,6 +3037,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46565 if (error)
46566 goto exit3;
46567 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
46568+ if (!error && (saved_ino || saved_dev))
46569+ gr_handle_delete(saved_ino, saved_dev);
46570 exit3:
46571 mnt_drop_write(nd.path.mnt);
46572 exit2:
46573@@ -2947,10 +3114,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
46574 error = mnt_want_write(path.mnt);
46575 if (error)
46576 goto out_dput;
46577+
46578+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
46579+ error = -EACCES;
46580+ goto out_drop_write;
46581+ }
46582+
46583 error = security_path_symlink(&path, dentry, from);
46584 if (error)
46585 goto out_drop_write;
46586 error = vfs_symlink(path.dentry->d_inode, dentry, from);
46587+ if (!error)
46588+ gr_handle_create(dentry, path.mnt);
46589 out_drop_write:
46590 mnt_drop_write(path.mnt);
46591 out_dput:
46592@@ -3025,6 +3200,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46593 {
46594 struct dentry *new_dentry;
46595 struct path old_path, new_path;
46596+ char *to = NULL;
46597 int how = 0;
46598 int error;
46599
46600@@ -3048,7 +3224,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46601 if (error)
46602 return error;
46603
46604- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
46605+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
46606 error = PTR_ERR(new_dentry);
46607 if (IS_ERR(new_dentry))
46608 goto out;
46609@@ -3059,13 +3235,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46610 error = mnt_want_write(new_path.mnt);
46611 if (error)
46612 goto out_dput;
46613+
46614+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
46615+ old_path.dentry->d_inode,
46616+ old_path.dentry->d_inode->i_mode, to)) {
46617+ error = -EACCES;
46618+ goto out_drop_write;
46619+ }
46620+
46621+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
46622+ old_path.dentry, old_path.mnt, to)) {
46623+ error = -EACCES;
46624+ goto out_drop_write;
46625+ }
46626+
46627 error = security_path_link(old_path.dentry, &new_path, new_dentry);
46628 if (error)
46629 goto out_drop_write;
46630 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
46631+ if (!error)
46632+ gr_handle_create(new_dentry, new_path.mnt);
46633 out_drop_write:
46634 mnt_drop_write(new_path.mnt);
46635 out_dput:
46636+ putname(to);
46637 dput(new_dentry);
46638 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
46639 path_put(&new_path);
46640@@ -3299,6 +3492,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46641 if (new_dentry == trap)
46642 goto exit5;
46643
46644+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
46645+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
46646+ to);
46647+ if (error)
46648+ goto exit5;
46649+
46650 error = mnt_want_write(oldnd.path.mnt);
46651 if (error)
46652 goto exit5;
46653@@ -3308,6 +3507,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46654 goto exit6;
46655 error = vfs_rename(old_dir->d_inode, old_dentry,
46656 new_dir->d_inode, new_dentry);
46657+ if (!error)
46658+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
46659+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
46660 exit6:
46661 mnt_drop_write(oldnd.path.mnt);
46662 exit5:
46663@@ -3333,6 +3535,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
46664
46665 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
46666 {
46667+ char tmpbuf[64];
46668+ const char *newlink;
46669 int len;
46670
46671 len = PTR_ERR(link);
46672@@ -3342,7 +3546,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
46673 len = strlen(link);
46674 if (len > (unsigned) buflen)
46675 len = buflen;
46676- if (copy_to_user(buffer, link, len))
46677+
46678+ if (len < sizeof(tmpbuf)) {
46679+ memcpy(tmpbuf, link, len);
46680+ newlink = tmpbuf;
46681+ } else
46682+ newlink = link;
46683+
46684+ if (copy_to_user(buffer, newlink, len))
46685 len = -EFAULT;
46686 out:
46687 return len;
46688diff --git a/fs/namespace.c b/fs/namespace.c
46689index 4e46539..b28253c 100644
46690--- a/fs/namespace.c
46691+++ b/fs/namespace.c
46692@@ -1156,6 +1156,9 @@ static int do_umount(struct mount *mnt, int flags)
46693 if (!(sb->s_flags & MS_RDONLY))
46694 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
46695 up_write(&sb->s_umount);
46696+
46697+ gr_log_remount(mnt->mnt_devname, retval);
46698+
46699 return retval;
46700 }
46701
46702@@ -1175,6 +1178,9 @@ static int do_umount(struct mount *mnt, int flags)
46703 br_write_unlock(vfsmount_lock);
46704 up_write(&namespace_sem);
46705 release_mounts(&umount_list);
46706+
46707+ gr_log_unmount(mnt->mnt_devname, retval);
46708+
46709 return retval;
46710 }
46711
46712@@ -2176,6 +2182,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
46713 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
46714 MS_STRICTATIME);
46715
46716+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
46717+ retval = -EPERM;
46718+ goto dput_out;
46719+ }
46720+
46721+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
46722+ retval = -EPERM;
46723+ goto dput_out;
46724+ }
46725+
46726 if (flags & MS_REMOUNT)
46727 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
46728 data_page);
46729@@ -2190,6 +2206,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
46730 dev_name, data_page);
46731 dput_out:
46732 path_put(&path);
46733+
46734+ gr_log_mount(dev_name, dir_name, retval);
46735+
46736 return retval;
46737 }
46738
46739@@ -2471,6 +2490,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
46740 if (error)
46741 goto out2;
46742
46743+ if (gr_handle_chroot_pivot()) {
46744+ error = -EPERM;
46745+ goto out2;
46746+ }
46747+
46748 get_fs_root(current->fs, &root);
46749 error = lock_mount(&old);
46750 if (error)
46751diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
46752index e8bbfa5..864f936 100644
46753--- a/fs/nfs/inode.c
46754+++ b/fs/nfs/inode.c
46755@@ -152,7 +152,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
46756 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
46757 nfsi->attrtimeo_timestamp = jiffies;
46758
46759- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
46760+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
46761 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
46762 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
46763 else
46764@@ -1005,16 +1005,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
46765 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
46766 }
46767
46768-static atomic_long_t nfs_attr_generation_counter;
46769+static atomic_long_unchecked_t nfs_attr_generation_counter;
46770
46771 static unsigned long nfs_read_attr_generation_counter(void)
46772 {
46773- return atomic_long_read(&nfs_attr_generation_counter);
46774+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
46775 }
46776
46777 unsigned long nfs_inc_attr_generation_counter(void)
46778 {
46779- return atomic_long_inc_return(&nfs_attr_generation_counter);
46780+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
46781 }
46782
46783 void nfs_fattr_init(struct nfs_fattr *fattr)
46784diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
46785index 5686661..80a9a3a 100644
46786--- a/fs/nfsd/vfs.c
46787+++ b/fs/nfsd/vfs.c
46788@@ -933,7 +933,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
46789 } else {
46790 oldfs = get_fs();
46791 set_fs(KERNEL_DS);
46792- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
46793+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
46794 set_fs(oldfs);
46795 }
46796
46797@@ -1037,7 +1037,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
46798
46799 /* Write the data. */
46800 oldfs = get_fs(); set_fs(KERNEL_DS);
46801- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
46802+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
46803 set_fs(oldfs);
46804 if (host_err < 0)
46805 goto out_nfserr;
46806@@ -1573,7 +1573,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
46807 */
46808
46809 oldfs = get_fs(); set_fs(KERNEL_DS);
46810- host_err = inode->i_op->readlink(path.dentry, buf, *lenp);
46811+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
46812 set_fs(oldfs);
46813
46814 if (host_err < 0)
46815diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
46816index 3568c8a..e0240d8 100644
46817--- a/fs/notify/fanotify/fanotify_user.c
46818+++ b/fs/notify/fanotify/fanotify_user.c
46819@@ -278,7 +278,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
46820 goto out_close_fd;
46821
46822 ret = -EFAULT;
46823- if (copy_to_user(buf, &fanotify_event_metadata,
46824+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
46825+ copy_to_user(buf, &fanotify_event_metadata,
46826 fanotify_event_metadata.event_len))
46827 goto out_kill_access_response;
46828
46829diff --git a/fs/notify/notification.c b/fs/notify/notification.c
46830index c887b13..0fdf472 100644
46831--- a/fs/notify/notification.c
46832+++ b/fs/notify/notification.c
46833@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
46834 * get set to 0 so it will never get 'freed'
46835 */
46836 static struct fsnotify_event *q_overflow_event;
46837-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46838+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46839
46840 /**
46841 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
46842@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46843 */
46844 u32 fsnotify_get_cookie(void)
46845 {
46846- return atomic_inc_return(&fsnotify_sync_cookie);
46847+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
46848 }
46849 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
46850
46851diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
46852index 99e3610..02c1068 100644
46853--- a/fs/ntfs/dir.c
46854+++ b/fs/ntfs/dir.c
46855@@ -1329,7 +1329,7 @@ find_next_index_buffer:
46856 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
46857 ~(s64)(ndir->itype.index.block_size - 1)));
46858 /* Bounds checks. */
46859- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
46860+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
46861 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
46862 "inode 0x%lx or driver bug.", vdir->i_ino);
46863 goto err_out;
46864diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
46865index 8639169..76697aa 100644
46866--- a/fs/ntfs/file.c
46867+++ b/fs/ntfs/file.c
46868@@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
46869 #endif /* NTFS_RW */
46870 };
46871
46872-const struct file_operations ntfs_empty_file_ops = {};
46873+const struct file_operations ntfs_empty_file_ops __read_only;
46874
46875-const struct inode_operations ntfs_empty_inode_ops = {};
46876+const struct inode_operations ntfs_empty_inode_ops __read_only;
46877diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
46878index 210c352..a174f83 100644
46879--- a/fs/ocfs2/localalloc.c
46880+++ b/fs/ocfs2/localalloc.c
46881@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
46882 goto bail;
46883 }
46884
46885- atomic_inc(&osb->alloc_stats.moves);
46886+ atomic_inc_unchecked(&osb->alloc_stats.moves);
46887
46888 bail:
46889 if (handle)
46890diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
46891index d355e6e..578d905 100644
46892--- a/fs/ocfs2/ocfs2.h
46893+++ b/fs/ocfs2/ocfs2.h
46894@@ -235,11 +235,11 @@ enum ocfs2_vol_state
46895
46896 struct ocfs2_alloc_stats
46897 {
46898- atomic_t moves;
46899- atomic_t local_data;
46900- atomic_t bitmap_data;
46901- atomic_t bg_allocs;
46902- atomic_t bg_extends;
46903+ atomic_unchecked_t moves;
46904+ atomic_unchecked_t local_data;
46905+ atomic_unchecked_t bitmap_data;
46906+ atomic_unchecked_t bg_allocs;
46907+ atomic_unchecked_t bg_extends;
46908 };
46909
46910 enum ocfs2_local_alloc_state
46911diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
46912index f169da4..9112253 100644
46913--- a/fs/ocfs2/suballoc.c
46914+++ b/fs/ocfs2/suballoc.c
46915@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
46916 mlog_errno(status);
46917 goto bail;
46918 }
46919- atomic_inc(&osb->alloc_stats.bg_extends);
46920+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
46921
46922 /* You should never ask for this much metadata */
46923 BUG_ON(bits_wanted >
46924@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
46925 mlog_errno(status);
46926 goto bail;
46927 }
46928- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46929+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46930
46931 *suballoc_loc = res.sr_bg_blkno;
46932 *suballoc_bit_start = res.sr_bit_offset;
46933@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
46934 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
46935 res->sr_bits);
46936
46937- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46938+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46939
46940 BUG_ON(res->sr_bits != 1);
46941
46942@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
46943 mlog_errno(status);
46944 goto bail;
46945 }
46946- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46947+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46948
46949 BUG_ON(res.sr_bits != 1);
46950
46951@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
46952 cluster_start,
46953 num_clusters);
46954 if (!status)
46955- atomic_inc(&osb->alloc_stats.local_data);
46956+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
46957 } else {
46958 if (min_clusters > (osb->bitmap_cpg - 1)) {
46959 /* The only paths asking for contiguousness
46960@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
46961 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
46962 res.sr_bg_blkno,
46963 res.sr_bit_offset);
46964- atomic_inc(&osb->alloc_stats.bitmap_data);
46965+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
46966 *num_clusters = res.sr_bits;
46967 }
46968 }
46969diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
46970index 68f4541..89cfe6a 100644
46971--- a/fs/ocfs2/super.c
46972+++ b/fs/ocfs2/super.c
46973@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
46974 "%10s => GlobalAllocs: %d LocalAllocs: %d "
46975 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
46976 "Stats",
46977- atomic_read(&osb->alloc_stats.bitmap_data),
46978- atomic_read(&osb->alloc_stats.local_data),
46979- atomic_read(&osb->alloc_stats.bg_allocs),
46980- atomic_read(&osb->alloc_stats.moves),
46981- atomic_read(&osb->alloc_stats.bg_extends));
46982+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
46983+ atomic_read_unchecked(&osb->alloc_stats.local_data),
46984+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
46985+ atomic_read_unchecked(&osb->alloc_stats.moves),
46986+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
46987
46988 out += snprintf(buf + out, len - out,
46989 "%10s => State: %u Descriptor: %llu Size: %u bits "
46990@@ -2116,11 +2116,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
46991 spin_lock_init(&osb->osb_xattr_lock);
46992 ocfs2_init_steal_slots(osb);
46993
46994- atomic_set(&osb->alloc_stats.moves, 0);
46995- atomic_set(&osb->alloc_stats.local_data, 0);
46996- atomic_set(&osb->alloc_stats.bitmap_data, 0);
46997- atomic_set(&osb->alloc_stats.bg_allocs, 0);
46998- atomic_set(&osb->alloc_stats.bg_extends, 0);
46999+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
47000+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
47001+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
47002+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
47003+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
47004
47005 /* Copy the blockcheck stats from the superblock probe */
47006 osb->osb_ecc_stats = *stats;
47007diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
47008index 5d22872..523db20 100644
47009--- a/fs/ocfs2/symlink.c
47010+++ b/fs/ocfs2/symlink.c
47011@@ -142,7 +142,7 @@ bail:
47012
47013 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
47014 {
47015- char *link = nd_get_link(nd);
47016+ const char *link = nd_get_link(nd);
47017 if (!IS_ERR(link))
47018 kfree(link);
47019 }
47020diff --git a/fs/open.c b/fs/open.c
47021index 5720854..ccfe124 100644
47022--- a/fs/open.c
47023+++ b/fs/open.c
47024@@ -31,6 +31,8 @@
47025 #include <linux/ima.h>
47026 #include <linux/dnotify.h>
47027
47028+#define CREATE_TRACE_POINTS
47029+#include <trace/events/fs.h>
47030 #include "internal.h"
47031
47032 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
47033@@ -112,6 +114,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
47034 error = locks_verify_truncate(inode, NULL, length);
47035 if (!error)
47036 error = security_path_truncate(&path);
47037+
47038+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
47039+ error = -EACCES;
47040+
47041 if (!error)
47042 error = do_truncate(path.dentry, length, 0, NULL);
47043
47044@@ -358,6 +364,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
47045 if (__mnt_is_readonly(path.mnt))
47046 res = -EROFS;
47047
47048+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
47049+ res = -EACCES;
47050+
47051 out_path_release:
47052 path_put(&path);
47053 out:
47054@@ -384,6 +393,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
47055 if (error)
47056 goto dput_and_out;
47057
47058+ gr_log_chdir(path.dentry, path.mnt);
47059+
47060 set_fs_pwd(current->fs, &path);
47061
47062 dput_and_out:
47063@@ -410,6 +421,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
47064 goto out_putf;
47065
47066 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
47067+
47068+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
47069+ error = -EPERM;
47070+
47071+ if (!error)
47072+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
47073+
47074 if (!error)
47075 set_fs_pwd(current->fs, &file->f_path);
47076 out_putf:
47077@@ -438,7 +456,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
47078 if (error)
47079 goto dput_and_out;
47080
47081+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
47082+ goto dput_and_out;
47083+
47084 set_fs_root(current->fs, &path);
47085+
47086+ gr_handle_chroot_chdir(&path);
47087+
47088 error = 0;
47089 dput_and_out:
47090 path_put(&path);
47091@@ -456,6 +480,16 @@ static int chmod_common(struct path *path, umode_t mode)
47092 if (error)
47093 return error;
47094 mutex_lock(&inode->i_mutex);
47095+
47096+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
47097+ error = -EACCES;
47098+ goto out_unlock;
47099+ }
47100+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
47101+ error = -EACCES;
47102+ goto out_unlock;
47103+ }
47104+
47105 error = security_path_chmod(path, mode);
47106 if (error)
47107 goto out_unlock;
47108@@ -506,6 +540,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
47109 int error;
47110 struct iattr newattrs;
47111
47112+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
47113+ return -EACCES;
47114+
47115 newattrs.ia_valid = ATTR_CTIME;
47116 if (user != (uid_t) -1) {
47117 newattrs.ia_valid |= ATTR_UID;
47118@@ -987,6 +1024,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
47119 } else {
47120 fsnotify_open(f);
47121 fd_install(fd, f);
47122+ trace_do_sys_open(tmp, flags, mode);
47123 }
47124 }
47125 putname(tmp);
47126diff --git a/fs/pipe.c b/fs/pipe.c
47127index fec5e4a..f4210f9 100644
47128--- a/fs/pipe.c
47129+++ b/fs/pipe.c
47130@@ -438,9 +438,9 @@ redo:
47131 }
47132 if (bufs) /* More to do? */
47133 continue;
47134- if (!pipe->writers)
47135+ if (!atomic_read(&pipe->writers))
47136 break;
47137- if (!pipe->waiting_writers) {
47138+ if (!atomic_read(&pipe->waiting_writers)) {
47139 /* syscall merging: Usually we must not sleep
47140 * if O_NONBLOCK is set, or if we got some data.
47141 * But if a writer sleeps in kernel space, then
47142@@ -504,7 +504,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
47143 mutex_lock(&inode->i_mutex);
47144 pipe = inode->i_pipe;
47145
47146- if (!pipe->readers) {
47147+ if (!atomic_read(&pipe->readers)) {
47148 send_sig(SIGPIPE, current, 0);
47149 ret = -EPIPE;
47150 goto out;
47151@@ -553,7 +553,7 @@ redo1:
47152 for (;;) {
47153 int bufs;
47154
47155- if (!pipe->readers) {
47156+ if (!atomic_read(&pipe->readers)) {
47157 send_sig(SIGPIPE, current, 0);
47158 if (!ret)
47159 ret = -EPIPE;
47160@@ -644,9 +644,9 @@ redo2:
47161 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
47162 do_wakeup = 0;
47163 }
47164- pipe->waiting_writers++;
47165+ atomic_inc(&pipe->waiting_writers);
47166 pipe_wait(pipe);
47167- pipe->waiting_writers--;
47168+ atomic_dec(&pipe->waiting_writers);
47169 }
47170 out:
47171 mutex_unlock(&inode->i_mutex);
47172@@ -713,7 +713,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47173 mask = 0;
47174 if (filp->f_mode & FMODE_READ) {
47175 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
47176- if (!pipe->writers && filp->f_version != pipe->w_counter)
47177+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
47178 mask |= POLLHUP;
47179 }
47180
47181@@ -723,7 +723,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47182 * Most Unices do not set POLLERR for FIFOs but on Linux they
47183 * behave exactly like pipes for poll().
47184 */
47185- if (!pipe->readers)
47186+ if (!atomic_read(&pipe->readers))
47187 mask |= POLLERR;
47188 }
47189
47190@@ -737,10 +737,10 @@ pipe_release(struct inode *inode, int decr, int decw)
47191
47192 mutex_lock(&inode->i_mutex);
47193 pipe = inode->i_pipe;
47194- pipe->readers -= decr;
47195- pipe->writers -= decw;
47196+ atomic_sub(decr, &pipe->readers);
47197+ atomic_sub(decw, &pipe->writers);
47198
47199- if (!pipe->readers && !pipe->writers) {
47200+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
47201 free_pipe_info(inode);
47202 } else {
47203 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
47204@@ -830,7 +830,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
47205
47206 if (inode->i_pipe) {
47207 ret = 0;
47208- inode->i_pipe->readers++;
47209+ atomic_inc(&inode->i_pipe->readers);
47210 }
47211
47212 mutex_unlock(&inode->i_mutex);
47213@@ -847,7 +847,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
47214
47215 if (inode->i_pipe) {
47216 ret = 0;
47217- inode->i_pipe->writers++;
47218+ atomic_inc(&inode->i_pipe->writers);
47219 }
47220
47221 mutex_unlock(&inode->i_mutex);
47222@@ -865,9 +865,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
47223 if (inode->i_pipe) {
47224 ret = 0;
47225 if (filp->f_mode & FMODE_READ)
47226- inode->i_pipe->readers++;
47227+ atomic_inc(&inode->i_pipe->readers);
47228 if (filp->f_mode & FMODE_WRITE)
47229- inode->i_pipe->writers++;
47230+ atomic_inc(&inode->i_pipe->writers);
47231 }
47232
47233 mutex_unlock(&inode->i_mutex);
47234@@ -959,7 +959,7 @@ void free_pipe_info(struct inode *inode)
47235 inode->i_pipe = NULL;
47236 }
47237
47238-static struct vfsmount *pipe_mnt __read_mostly;
47239+struct vfsmount *pipe_mnt __read_mostly;
47240
47241 /*
47242 * pipefs_dname() is called from d_path().
47243@@ -989,7 +989,8 @@ static struct inode * get_pipe_inode(void)
47244 goto fail_iput;
47245 inode->i_pipe = pipe;
47246
47247- pipe->readers = pipe->writers = 1;
47248+ atomic_set(&pipe->readers, 1);
47249+ atomic_set(&pipe->writers, 1);
47250 inode->i_fop = &rdwr_pipefifo_fops;
47251
47252 /*
47253diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
47254index 15af622..0e9f4467 100644
47255--- a/fs/proc/Kconfig
47256+++ b/fs/proc/Kconfig
47257@@ -30,12 +30,12 @@ config PROC_FS
47258
47259 config PROC_KCORE
47260 bool "/proc/kcore support" if !ARM
47261- depends on PROC_FS && MMU
47262+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
47263
47264 config PROC_VMCORE
47265 bool "/proc/vmcore support"
47266- depends on PROC_FS && CRASH_DUMP
47267- default y
47268+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
47269+ default n
47270 help
47271 Exports the dump image of crashed kernel in ELF format.
47272
47273@@ -59,8 +59,8 @@ config PROC_SYSCTL
47274 limited in memory.
47275
47276 config PROC_PAGE_MONITOR
47277- default y
47278- depends on PROC_FS && MMU
47279+ default n
47280+ depends on PROC_FS && MMU && !GRKERNSEC
47281 bool "Enable /proc page monitoring" if EXPERT
47282 help
47283 Various /proc files exist to monitor process memory utilization:
47284diff --git a/fs/proc/array.c b/fs/proc/array.c
47285index f9bd395..acb7847 100644
47286--- a/fs/proc/array.c
47287+++ b/fs/proc/array.c
47288@@ -60,6 +60,7 @@
47289 #include <linux/tty.h>
47290 #include <linux/string.h>
47291 #include <linux/mman.h>
47292+#include <linux/grsecurity.h>
47293 #include <linux/proc_fs.h>
47294 #include <linux/ioport.h>
47295 #include <linux/uaccess.h>
47296@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
47297 seq_putc(m, '\n');
47298 }
47299
47300+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47301+static inline void task_pax(struct seq_file *m, struct task_struct *p)
47302+{
47303+ if (p->mm)
47304+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
47305+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
47306+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
47307+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
47308+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
47309+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
47310+ else
47311+ seq_printf(m, "PaX:\t-----\n");
47312+}
47313+#endif
47314+
47315 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47316 struct pid *pid, struct task_struct *task)
47317 {
47318@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47319 task_cpus_allowed(m, task);
47320 cpuset_task_status_allowed(m, task);
47321 task_context_switch_counts(m, task);
47322+
47323+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47324+ task_pax(m, task);
47325+#endif
47326+
47327+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
47328+ task_grsec_rbac(m, task);
47329+#endif
47330+
47331 return 0;
47332 }
47333
47334+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47335+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47336+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
47337+ _mm->pax_flags & MF_PAX_SEGMEXEC))
47338+#endif
47339+
47340 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47341 struct pid *pid, struct task_struct *task, int whole)
47342 {
47343@@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47344 char tcomm[sizeof(task->comm)];
47345 unsigned long flags;
47346
47347+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47348+ if (current->exec_id != m->exec_id) {
47349+ gr_log_badprocpid("stat");
47350+ return 0;
47351+ }
47352+#endif
47353+
47354 state = *get_task_state(task);
47355 vsize = eip = esp = 0;
47356 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
47357@@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47358 gtime = task->gtime;
47359 }
47360
47361+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47362+ if (PAX_RAND_FLAGS(mm)) {
47363+ eip = 0;
47364+ esp = 0;
47365+ wchan = 0;
47366+ }
47367+#endif
47368+#ifdef CONFIG_GRKERNSEC_HIDESYM
47369+ wchan = 0;
47370+ eip =0;
47371+ esp =0;
47372+#endif
47373+
47374 /* scale priority and nice values from timeslices to -20..20 */
47375 /* to make it look like a "normal" Unix priority/nice value */
47376 priority = task_prio(task);
47377@@ -485,9 +536,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47378 seq_put_decimal_ull(m, ' ', vsize);
47379 seq_put_decimal_ll(m, ' ', mm ? get_mm_rss(mm) : 0);
47380 seq_put_decimal_ull(m, ' ', rsslim);
47381+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47382+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
47383+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
47384+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
47385+#else
47386 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
47387 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
47388 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
47389+#endif
47390 seq_put_decimal_ull(m, ' ', esp);
47391 seq_put_decimal_ull(m, ' ', eip);
47392 /* The signal information here is obsolete.
47393@@ -508,9 +565,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47394 seq_put_decimal_ull(m, ' ', delayacct_blkio_ticks(task));
47395 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
47396 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
47397+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47398+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((mm && permitted) ? mm->start_data : 0));
47399+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((mm && permitted) ? mm->end_data : 0));
47400+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((mm && permitted) ? mm->start_brk : 0));
47401+#else
47402 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_data : 0);
47403 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->end_data : 0);
47404 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_brk : 0);
47405+#endif
47406 seq_putc(m, '\n');
47407 if (mm)
47408 mmput(mm);
47409@@ -533,8 +596,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47410 struct pid *pid, struct task_struct *task)
47411 {
47412 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
47413- struct mm_struct *mm = get_task_mm(task);
47414+ struct mm_struct *mm;
47415
47416+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47417+ if (current->exec_id != m->exec_id) {
47418+ gr_log_badprocpid("statm");
47419+ return 0;
47420+ }
47421+#endif
47422+ mm = get_task_mm(task);
47423 if (mm) {
47424 size = task_statm(mm, &shared, &text, &data, &resident);
47425 mmput(mm);
47426@@ -556,3 +626,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47427
47428 return 0;
47429 }
47430+
47431+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47432+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
47433+{
47434+ u32 curr_ip = 0;
47435+ unsigned long flags;
47436+
47437+ if (lock_task_sighand(task, &flags)) {
47438+ curr_ip = task->signal->curr_ip;
47439+ unlock_task_sighand(task, &flags);
47440+ }
47441+
47442+ return sprintf(buffer, "%pI4\n", &curr_ip);
47443+}
47444+#endif
47445diff --git a/fs/proc/base.c b/fs/proc/base.c
47446index 9fc77b4..04761b8 100644
47447--- a/fs/proc/base.c
47448+++ b/fs/proc/base.c
47449@@ -109,6 +109,14 @@ struct pid_entry {
47450 union proc_op op;
47451 };
47452
47453+struct getdents_callback {
47454+ struct linux_dirent __user * current_dir;
47455+ struct linux_dirent __user * previous;
47456+ struct file * file;
47457+ int count;
47458+ int error;
47459+};
47460+
47461 #define NOD(NAME, MODE, IOP, FOP, OP) { \
47462 .name = (NAME), \
47463 .len = sizeof(NAME) - 1, \
47464@@ -213,6 +221,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
47465 if (!mm->arg_end)
47466 goto out_mm; /* Shh! No looking before we're done */
47467
47468+ if (gr_acl_handle_procpidmem(task))
47469+ goto out_mm;
47470+
47471 len = mm->arg_end - mm->arg_start;
47472
47473 if (len > PAGE_SIZE)
47474@@ -240,12 +251,28 @@ out:
47475 return res;
47476 }
47477
47478+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47479+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47480+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
47481+ _mm->pax_flags & MF_PAX_SEGMEXEC))
47482+#endif
47483+
47484 static int proc_pid_auxv(struct task_struct *task, char *buffer)
47485 {
47486 struct mm_struct *mm = mm_for_maps(task);
47487 int res = PTR_ERR(mm);
47488 if (mm && !IS_ERR(mm)) {
47489 unsigned int nwords = 0;
47490+
47491+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47492+ /* allow if we're currently ptracing this task */
47493+ if (PAX_RAND_FLAGS(mm) &&
47494+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
47495+ mmput(mm);
47496+ return 0;
47497+ }
47498+#endif
47499+
47500 do {
47501 nwords += 2;
47502 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
47503@@ -259,7 +286,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
47504 }
47505
47506
47507-#ifdef CONFIG_KALLSYMS
47508+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47509 /*
47510 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
47511 * Returns the resolved symbol. If that fails, simply return the address.
47512@@ -298,7 +325,7 @@ static void unlock_trace(struct task_struct *task)
47513 mutex_unlock(&task->signal->cred_guard_mutex);
47514 }
47515
47516-#ifdef CONFIG_STACKTRACE
47517+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47518
47519 #define MAX_STACK_TRACE_DEPTH 64
47520
47521@@ -489,7 +516,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
47522 return count;
47523 }
47524
47525-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47526+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47527 static int proc_pid_syscall(struct task_struct *task, char *buffer)
47528 {
47529 long nr;
47530@@ -518,7 +545,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
47531 /************************************************************************/
47532
47533 /* permission checks */
47534-static int proc_fd_access_allowed(struct inode *inode)
47535+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
47536 {
47537 struct task_struct *task;
47538 int allowed = 0;
47539@@ -528,7 +555,10 @@ static int proc_fd_access_allowed(struct inode *inode)
47540 */
47541 task = get_proc_task(inode);
47542 if (task) {
47543- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47544+ if (log)
47545+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47546+ else
47547+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
47548 put_task_struct(task);
47549 }
47550 return allowed;
47551@@ -566,10 +596,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
47552 struct task_struct *task,
47553 int hide_pid_min)
47554 {
47555+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47556+ return false;
47557+
47558+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47559+ rcu_read_lock();
47560+ {
47561+ const struct cred *tmpcred = current_cred();
47562+ const struct cred *cred = __task_cred(task);
47563+
47564+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
47565+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47566+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
47567+#endif
47568+ ) {
47569+ rcu_read_unlock();
47570+ return true;
47571+ }
47572+ }
47573+ rcu_read_unlock();
47574+
47575+ if (!pid->hide_pid)
47576+ return false;
47577+#endif
47578+
47579 if (pid->hide_pid < hide_pid_min)
47580 return true;
47581 if (in_group_p(pid->pid_gid))
47582 return true;
47583+
47584 return ptrace_may_access(task, PTRACE_MODE_READ);
47585 }
47586
47587@@ -587,7 +642,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
47588 put_task_struct(task);
47589
47590 if (!has_perms) {
47591+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47592+ {
47593+#else
47594 if (pid->hide_pid == 2) {
47595+#endif
47596 /*
47597 * Let's make getdents(), stat(), and open()
47598 * consistent with each other. If a process
47599@@ -702,6 +761,10 @@ static int mem_open(struct inode* inode, struct file* file)
47600 file->f_mode |= FMODE_UNSIGNED_OFFSET;
47601 file->private_data = mm;
47602
47603+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47604+ file->f_version = current->exec_id;
47605+#endif
47606+
47607 return 0;
47608 }
47609
47610@@ -713,6 +776,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
47611 ssize_t copied;
47612 char *page;
47613
47614+#ifdef CONFIG_GRKERNSEC
47615+ if (write)
47616+ return -EPERM;
47617+#endif
47618+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47619+ if (file->f_version != current->exec_id) {
47620+ gr_log_badprocpid("mem");
47621+ return 0;
47622+ }
47623+#endif
47624+
47625 if (!mm)
47626 return 0;
47627
47628@@ -813,6 +887,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
47629 if (!task)
47630 goto out_no_task;
47631
47632+ if (gr_acl_handle_procpidmem(task))
47633+ goto out;
47634+
47635 ret = -ENOMEM;
47636 page = (char *)__get_free_page(GFP_TEMPORARY);
47637 if (!page)
47638@@ -1433,7 +1510,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
47639 path_put(&nd->path);
47640
47641 /* Are we allowed to snoop on the tasks file descriptors? */
47642- if (!proc_fd_access_allowed(inode))
47643+ if (!proc_fd_access_allowed(inode, 0))
47644 goto out;
47645
47646 error = PROC_I(inode)->op.proc_get_link(dentry, &nd->path);
47647@@ -1472,8 +1549,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
47648 struct path path;
47649
47650 /* Are we allowed to snoop on the tasks file descriptors? */
47651- if (!proc_fd_access_allowed(inode))
47652- goto out;
47653+ /* logging this is needed for learning on chromium to work properly,
47654+ but we don't want to flood the logs from 'ps' which does a readlink
47655+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
47656+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
47657+ */
47658+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
47659+ if (!proc_fd_access_allowed(inode,0))
47660+ goto out;
47661+ } else {
47662+ if (!proc_fd_access_allowed(inode,1))
47663+ goto out;
47664+ }
47665
47666 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
47667 if (error)
47668@@ -1538,7 +1625,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
47669 rcu_read_lock();
47670 cred = __task_cred(task);
47671 inode->i_uid = cred->euid;
47672+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47673+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47674+#else
47675 inode->i_gid = cred->egid;
47676+#endif
47677 rcu_read_unlock();
47678 }
47679 security_task_to_inode(task, inode);
47680@@ -1574,10 +1665,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
47681 return -ENOENT;
47682 }
47683 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
47684+#ifdef CONFIG_GRKERNSEC_PROC_USER
47685+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
47686+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47687+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
47688+#endif
47689 task_dumpable(task)) {
47690 cred = __task_cred(task);
47691 stat->uid = cred->euid;
47692+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47693+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
47694+#else
47695 stat->gid = cred->egid;
47696+#endif
47697 }
47698 }
47699 rcu_read_unlock();
47700@@ -1615,11 +1715,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
47701
47702 if (task) {
47703 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
47704+#ifdef CONFIG_GRKERNSEC_PROC_USER
47705+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
47706+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47707+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
47708+#endif
47709 task_dumpable(task)) {
47710 rcu_read_lock();
47711 cred = __task_cred(task);
47712 inode->i_uid = cred->euid;
47713+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47714+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47715+#else
47716 inode->i_gid = cred->egid;
47717+#endif
47718 rcu_read_unlock();
47719 } else {
47720 inode->i_uid = 0;
47721@@ -1737,7 +1846,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
47722 int fd = proc_fd(inode);
47723
47724 if (task) {
47725- files = get_files_struct(task);
47726+ if (!gr_acl_handle_procpidmem(task))
47727+ files = get_files_struct(task);
47728 put_task_struct(task);
47729 }
47730 if (files) {
47731@@ -2338,11 +2448,21 @@ static const struct file_operations proc_map_files_operations = {
47732 */
47733 static int proc_fd_permission(struct inode *inode, int mask)
47734 {
47735+ struct task_struct *task;
47736 int rv = generic_permission(inode, mask);
47737- if (rv == 0)
47738- return 0;
47739+
47740 if (task_pid(current) == proc_pid(inode))
47741 rv = 0;
47742+
47743+ task = get_proc_task(inode);
47744+ if (task == NULL)
47745+ return rv;
47746+
47747+ if (gr_acl_handle_procpidmem(task))
47748+ rv = -EACCES;
47749+
47750+ put_task_struct(task);
47751+
47752 return rv;
47753 }
47754
47755@@ -2452,6 +2572,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
47756 if (!task)
47757 goto out_no_task;
47758
47759+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47760+ goto out;
47761+
47762 /*
47763 * Yes, it does not scale. And it should not. Don't add
47764 * new entries into /proc/<tgid>/ without very good reasons.
47765@@ -2496,6 +2619,9 @@ static int proc_pident_readdir(struct file *filp,
47766 if (!task)
47767 goto out_no_task;
47768
47769+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47770+ goto out;
47771+
47772 ret = 0;
47773 i = filp->f_pos;
47774 switch (i) {
47775@@ -2766,7 +2892,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
47776 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
47777 void *cookie)
47778 {
47779- char *s = nd_get_link(nd);
47780+ const char *s = nd_get_link(nd);
47781 if (!IS_ERR(s))
47782 __putname(s);
47783 }
47784@@ -2967,7 +3093,7 @@ static const struct pid_entry tgid_base_stuff[] = {
47785 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
47786 #endif
47787 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
47788-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47789+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47790 INF("syscall", S_IRUGO, proc_pid_syscall),
47791 #endif
47792 INF("cmdline", S_IRUGO, proc_pid_cmdline),
47793@@ -2992,10 +3118,10 @@ static const struct pid_entry tgid_base_stuff[] = {
47794 #ifdef CONFIG_SECURITY
47795 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
47796 #endif
47797-#ifdef CONFIG_KALLSYMS
47798+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47799 INF("wchan", S_IRUGO, proc_pid_wchan),
47800 #endif
47801-#ifdef CONFIG_STACKTRACE
47802+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47803 ONE("stack", S_IRUGO, proc_pid_stack),
47804 #endif
47805 #ifdef CONFIG_SCHEDSTATS
47806@@ -3029,6 +3155,9 @@ static const struct pid_entry tgid_base_stuff[] = {
47807 #ifdef CONFIG_HARDWALL
47808 INF("hardwall", S_IRUGO, proc_pid_hardwall),
47809 #endif
47810+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47811+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
47812+#endif
47813 };
47814
47815 static int proc_tgid_base_readdir(struct file * filp,
47816@@ -3155,7 +3284,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
47817 if (!inode)
47818 goto out;
47819
47820+#ifdef CONFIG_GRKERNSEC_PROC_USER
47821+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
47822+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47823+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47824+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
47825+#else
47826 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
47827+#endif
47828 inode->i_op = &proc_tgid_base_inode_operations;
47829 inode->i_fop = &proc_tgid_base_operations;
47830 inode->i_flags|=S_IMMUTABLE;
47831@@ -3197,7 +3333,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
47832 if (!task)
47833 goto out;
47834
47835+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47836+ goto out_put_task;
47837+
47838 result = proc_pid_instantiate(dir, dentry, task, NULL);
47839+out_put_task:
47840 put_task_struct(task);
47841 out:
47842 return result;
47843@@ -3260,6 +3400,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
47844 static int fake_filldir(void *buf, const char *name, int namelen,
47845 loff_t offset, u64 ino, unsigned d_type)
47846 {
47847+ struct getdents_callback * __buf = (struct getdents_callback *) buf;
47848+ __buf->error = -EINVAL;
47849 return 0;
47850 }
47851
47852@@ -3326,7 +3468,7 @@ static const struct pid_entry tid_base_stuff[] = {
47853 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
47854 #endif
47855 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
47856-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47857+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47858 INF("syscall", S_IRUGO, proc_pid_syscall),
47859 #endif
47860 INF("cmdline", S_IRUGO, proc_pid_cmdline),
47861@@ -3350,10 +3492,10 @@ static const struct pid_entry tid_base_stuff[] = {
47862 #ifdef CONFIG_SECURITY
47863 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
47864 #endif
47865-#ifdef CONFIG_KALLSYMS
47866+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47867 INF("wchan", S_IRUGO, proc_pid_wchan),
47868 #endif
47869-#ifdef CONFIG_STACKTRACE
47870+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47871 ONE("stack", S_IRUGO, proc_pid_stack),
47872 #endif
47873 #ifdef CONFIG_SCHEDSTATS
47874diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
47875index 82676e3..5f8518a 100644
47876--- a/fs/proc/cmdline.c
47877+++ b/fs/proc/cmdline.c
47878@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
47879
47880 static int __init proc_cmdline_init(void)
47881 {
47882+#ifdef CONFIG_GRKERNSEC_PROC_ADD
47883+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
47884+#else
47885 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
47886+#endif
47887 return 0;
47888 }
47889 module_init(proc_cmdline_init);
47890diff --git a/fs/proc/devices.c b/fs/proc/devices.c
47891index b143471..bb105e5 100644
47892--- a/fs/proc/devices.c
47893+++ b/fs/proc/devices.c
47894@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
47895
47896 static int __init proc_devices_init(void)
47897 {
47898+#ifdef CONFIG_GRKERNSEC_PROC_ADD
47899+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
47900+#else
47901 proc_create("devices", 0, NULL, &proc_devinfo_operations);
47902+#endif
47903 return 0;
47904 }
47905 module_init(proc_devices_init);
47906diff --git a/fs/proc/inode.c b/fs/proc/inode.c
47907index 205c922..2ee4c57 100644
47908--- a/fs/proc/inode.c
47909+++ b/fs/proc/inode.c
47910@@ -21,11 +21,17 @@
47911 #include <linux/seq_file.h>
47912 #include <linux/slab.h>
47913 #include <linux/mount.h>
47914+#include <linux/grsecurity.h>
47915
47916 #include <asm/uaccess.h>
47917
47918 #include "internal.h"
47919
47920+#ifdef CONFIG_PROC_SYSCTL
47921+extern const struct inode_operations proc_sys_inode_operations;
47922+extern const struct inode_operations proc_sys_dir_operations;
47923+#endif
47924+
47925 static void proc_evict_inode(struct inode *inode)
47926 {
47927 struct proc_dir_entry *de;
47928@@ -51,6 +57,13 @@ static void proc_evict_inode(struct inode *inode)
47929 ns_ops = PROC_I(inode)->ns_ops;
47930 if (ns_ops && ns_ops->put)
47931 ns_ops->put(PROC_I(inode)->ns);
47932+
47933+#ifdef CONFIG_PROC_SYSCTL
47934+ if (inode->i_op == &proc_sys_inode_operations ||
47935+ inode->i_op == &proc_sys_dir_operations)
47936+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
47937+#endif
47938+
47939 }
47940
47941 static struct kmem_cache * proc_inode_cachep;
47942@@ -456,7 +469,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
47943 if (de->mode) {
47944 inode->i_mode = de->mode;
47945 inode->i_uid = de->uid;
47946+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47947+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47948+#else
47949 inode->i_gid = de->gid;
47950+#endif
47951 }
47952 if (de->size)
47953 inode->i_size = de->size;
47954diff --git a/fs/proc/internal.h b/fs/proc/internal.h
47955index 5f79bb8..eeccee4 100644
47956--- a/fs/proc/internal.h
47957+++ b/fs/proc/internal.h
47958@@ -54,6 +54,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47959 struct pid *pid, struct task_struct *task);
47960 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47961 struct pid *pid, struct task_struct *task);
47962+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47963+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
47964+#endif
47965 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
47966
47967 extern const struct file_operations proc_pid_maps_operations;
47968diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
47969index 86c67ee..cdca321 100644
47970--- a/fs/proc/kcore.c
47971+++ b/fs/proc/kcore.c
47972@@ -480,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47973 * the addresses in the elf_phdr on our list.
47974 */
47975 start = kc_offset_to_vaddr(*fpos - elf_buflen);
47976- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
47977+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
47978+ if (tsz > buflen)
47979 tsz = buflen;
47980-
47981+
47982 while (buflen) {
47983 struct kcore_list *m;
47984
47985@@ -511,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47986 kfree(elf_buf);
47987 } else {
47988 if (kern_addr_valid(start)) {
47989- unsigned long n;
47990+ char *elf_buf;
47991+ mm_segment_t oldfs;
47992
47993- n = copy_to_user(buffer, (char *)start, tsz);
47994- /*
47995- * We cannot distinguish between fault on source
47996- * and fault on destination. When this happens
47997- * we clear too and hope it will trigger the
47998- * EFAULT again.
47999- */
48000- if (n) {
48001- if (clear_user(buffer + tsz - n,
48002- n))
48003+ elf_buf = kmalloc(tsz, GFP_KERNEL);
48004+ if (!elf_buf)
48005+ return -ENOMEM;
48006+ oldfs = get_fs();
48007+ set_fs(KERNEL_DS);
48008+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
48009+ set_fs(oldfs);
48010+ if (copy_to_user(buffer, elf_buf, tsz)) {
48011+ kfree(elf_buf);
48012 return -EFAULT;
48013+ }
48014 }
48015+ set_fs(oldfs);
48016+ kfree(elf_buf);
48017 } else {
48018 if (clear_user(buffer, tsz))
48019 return -EFAULT;
48020@@ -544,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48021
48022 static int open_kcore(struct inode *inode, struct file *filp)
48023 {
48024+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
48025+ return -EPERM;
48026+#endif
48027 if (!capable(CAP_SYS_RAWIO))
48028 return -EPERM;
48029 if (kcore_need_update)
48030diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
48031index 80e4645..53e5fcf 100644
48032--- a/fs/proc/meminfo.c
48033+++ b/fs/proc/meminfo.c
48034@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
48035 vmi.used >> 10,
48036 vmi.largest_chunk >> 10
48037 #ifdef CONFIG_MEMORY_FAILURE
48038- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
48039+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
48040 #endif
48041 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
48042 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
48043diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
48044index b1822dd..df622cb 100644
48045--- a/fs/proc/nommu.c
48046+++ b/fs/proc/nommu.c
48047@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
48048 if (len < 1)
48049 len = 1;
48050 seq_printf(m, "%*c", len, ' ');
48051- seq_path(m, &file->f_path, "");
48052+ seq_path(m, &file->f_path, "\n\\");
48053 }
48054
48055 seq_putc(m, '\n');
48056diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
48057index 06e1cc1..177cd98 100644
48058--- a/fs/proc/proc_net.c
48059+++ b/fs/proc/proc_net.c
48060@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
48061 struct task_struct *task;
48062 struct nsproxy *ns;
48063 struct net *net = NULL;
48064+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48065+ const struct cred *cred = current_cred();
48066+#endif
48067+
48068+#ifdef CONFIG_GRKERNSEC_PROC_USER
48069+ if (cred->fsuid)
48070+ return net;
48071+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48072+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
48073+ return net;
48074+#endif
48075
48076 rcu_read_lock();
48077 task = pid_task(proc_pid(dir), PIDTYPE_PID);
48078diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
48079index 21d836f..bebf3ee 100644
48080--- a/fs/proc/proc_sysctl.c
48081+++ b/fs/proc/proc_sysctl.c
48082@@ -12,11 +12,15 @@
48083 #include <linux/module.h>
48084 #include "internal.h"
48085
48086+extern int gr_handle_chroot_sysctl(const int op);
48087+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
48088+ const int op);
48089+
48090 static const struct dentry_operations proc_sys_dentry_operations;
48091 static const struct file_operations proc_sys_file_operations;
48092-static const struct inode_operations proc_sys_inode_operations;
48093+const struct inode_operations proc_sys_inode_operations;
48094 static const struct file_operations proc_sys_dir_file_operations;
48095-static const struct inode_operations proc_sys_dir_operations;
48096+const struct inode_operations proc_sys_dir_operations;
48097
48098 void proc_sys_poll_notify(struct ctl_table_poll *poll)
48099 {
48100@@ -470,8 +474,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
48101
48102 err = NULL;
48103 d_set_d_op(dentry, &proc_sys_dentry_operations);
48104+
48105+ gr_handle_proc_create(dentry, inode);
48106+
48107 d_add(dentry, inode);
48108
48109+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt))
48110+ err = ERR_PTR(-ENOENT);
48111+
48112 out:
48113 sysctl_head_finish(head);
48114 return err;
48115@@ -483,18 +493,20 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
48116 struct inode *inode = filp->f_path.dentry->d_inode;
48117 struct ctl_table_header *head = grab_header(inode);
48118 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
48119+ int op = write ? MAY_WRITE : MAY_READ;
48120 ssize_t error;
48121 size_t res;
48122
48123 if (IS_ERR(head))
48124 return PTR_ERR(head);
48125
48126+
48127 /*
48128 * At this point we know that the sysctl was not unregistered
48129 * and won't be until we finish.
48130 */
48131 error = -EPERM;
48132- if (sysctl_perm(head->root, table, write ? MAY_WRITE : MAY_READ))
48133+ if (sysctl_perm(head->root, table, op))
48134 goto out;
48135
48136 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
48137@@ -502,6 +514,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
48138 if (!table->proc_handler)
48139 goto out;
48140
48141+#ifdef CONFIG_GRKERNSEC
48142+ error = -EPERM;
48143+ if (gr_handle_chroot_sysctl(op))
48144+ goto out;
48145+ dget(filp->f_path.dentry);
48146+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
48147+ dput(filp->f_path.dentry);
48148+ goto out;
48149+ }
48150+ dput(filp->f_path.dentry);
48151+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
48152+ goto out;
48153+ if (write && !capable(CAP_SYS_ADMIN))
48154+ goto out;
48155+#endif
48156+
48157 /* careful: calling conventions are nasty here */
48158 res = count;
48159 error = table->proc_handler(table, write, buf, &res, ppos);
48160@@ -599,6 +627,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
48161 return -ENOMEM;
48162 } else {
48163 d_set_d_op(child, &proc_sys_dentry_operations);
48164+
48165+ gr_handle_proc_create(child, inode);
48166+
48167 d_add(child, inode);
48168 }
48169 } else {
48170@@ -642,6 +673,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
48171 if ((*pos)++ < file->f_pos)
48172 return 0;
48173
48174+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
48175+ return 0;
48176+
48177 if (unlikely(S_ISLNK(table->mode)))
48178 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
48179 else
48180@@ -759,6 +793,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
48181 if (IS_ERR(head))
48182 return PTR_ERR(head);
48183
48184+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
48185+ return -ENOENT;
48186+
48187 generic_fillattr(inode, stat);
48188 if (table)
48189 stat->mode = (stat->mode & S_IFMT) | table->mode;
48190@@ -781,13 +818,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
48191 .llseek = generic_file_llseek,
48192 };
48193
48194-static const struct inode_operations proc_sys_inode_operations = {
48195+const struct inode_operations proc_sys_inode_operations = {
48196 .permission = proc_sys_permission,
48197 .setattr = proc_sys_setattr,
48198 .getattr = proc_sys_getattr,
48199 };
48200
48201-static const struct inode_operations proc_sys_dir_operations = {
48202+const struct inode_operations proc_sys_dir_operations = {
48203 .lookup = proc_sys_lookup,
48204 .permission = proc_sys_permission,
48205 .setattr = proc_sys_setattr,
48206diff --git a/fs/proc/root.c b/fs/proc/root.c
48207index eed44bf..abeb499 100644
48208--- a/fs/proc/root.c
48209+++ b/fs/proc/root.c
48210@@ -188,7 +188,15 @@ void __init proc_root_init(void)
48211 #ifdef CONFIG_PROC_DEVICETREE
48212 proc_device_tree_init();
48213 #endif
48214+#ifdef CONFIG_GRKERNSEC_PROC_ADD
48215+#ifdef CONFIG_GRKERNSEC_PROC_USER
48216+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
48217+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48218+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
48219+#endif
48220+#else
48221 proc_mkdir("bus", NULL);
48222+#endif
48223 proc_sys_init();
48224 }
48225
48226diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
48227index 7faaf2a..096c28b 100644
48228--- a/fs/proc/task_mmu.c
48229+++ b/fs/proc/task_mmu.c
48230@@ -11,12 +11,19 @@
48231 #include <linux/rmap.h>
48232 #include <linux/swap.h>
48233 #include <linux/swapops.h>
48234+#include <linux/grsecurity.h>
48235
48236 #include <asm/elf.h>
48237 #include <asm/uaccess.h>
48238 #include <asm/tlbflush.h>
48239 #include "internal.h"
48240
48241+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48242+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48243+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
48244+ _mm->pax_flags & MF_PAX_SEGMEXEC))
48245+#endif
48246+
48247 void task_mem(struct seq_file *m, struct mm_struct *mm)
48248 {
48249 unsigned long data, text, lib, swap;
48250@@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48251 "VmExe:\t%8lu kB\n"
48252 "VmLib:\t%8lu kB\n"
48253 "VmPTE:\t%8lu kB\n"
48254- "VmSwap:\t%8lu kB\n",
48255- hiwater_vm << (PAGE_SHIFT-10),
48256+ "VmSwap:\t%8lu kB\n"
48257+
48258+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48259+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
48260+#endif
48261+
48262+ ,hiwater_vm << (PAGE_SHIFT-10),
48263 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
48264 mm->locked_vm << (PAGE_SHIFT-10),
48265 mm->pinned_vm << (PAGE_SHIFT-10),
48266@@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48267 data << (PAGE_SHIFT-10),
48268 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
48269 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
48270- swap << (PAGE_SHIFT-10));
48271+ swap << (PAGE_SHIFT-10)
48272+
48273+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48274+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48275+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
48276+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
48277+#else
48278+ , mm->context.user_cs_base
48279+ , mm->context.user_cs_limit
48280+#endif
48281+#endif
48282+
48283+ );
48284 }
48285
48286 unsigned long task_vsize(struct mm_struct *mm)
48287@@ -231,13 +255,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48288 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
48289 }
48290
48291- /* We don't show the stack guard page in /proc/maps */
48292+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48293+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
48294+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
48295+#else
48296 start = vma->vm_start;
48297- if (stack_guard_page_start(vma, start))
48298- start += PAGE_SIZE;
48299 end = vma->vm_end;
48300- if (stack_guard_page_end(vma, end))
48301- end -= PAGE_SIZE;
48302+#endif
48303
48304 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
48305 start,
48306@@ -246,7 +270,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48307 flags & VM_WRITE ? 'w' : '-',
48308 flags & VM_EXEC ? 'x' : '-',
48309 flags & VM_MAYSHARE ? 's' : 'p',
48310+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48311+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
48312+#else
48313 pgoff,
48314+#endif
48315 MAJOR(dev), MINOR(dev), ino, &len);
48316
48317 /*
48318@@ -255,7 +283,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48319 */
48320 if (file) {
48321 pad_len_spaces(m, len);
48322- seq_path(m, &file->f_path, "\n");
48323+ seq_path(m, &file->f_path, "\n\\");
48324 goto done;
48325 }
48326
48327@@ -281,8 +309,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48328 * Thread stack in /proc/PID/task/TID/maps or
48329 * the main process stack.
48330 */
48331- if (!is_pid || (vma->vm_start <= mm->start_stack &&
48332- vma->vm_end >= mm->start_stack)) {
48333+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
48334+ (vma->vm_start <= mm->start_stack &&
48335+ vma->vm_end >= mm->start_stack)) {
48336 name = "[stack]";
48337 } else {
48338 /* Thread stack in /proc/PID/maps */
48339@@ -306,6 +335,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
48340 struct proc_maps_private *priv = m->private;
48341 struct task_struct *task = priv->task;
48342
48343+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48344+ if (current->exec_id != m->exec_id) {
48345+ gr_log_badprocpid("maps");
48346+ return 0;
48347+ }
48348+#endif
48349+
48350 show_map_vma(m, vma, is_pid);
48351
48352 if (m->count < m->size) /* vma is copied successfully */
48353@@ -482,12 +518,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
48354 .private = &mss,
48355 };
48356
48357+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48358+ if (current->exec_id != m->exec_id) {
48359+ gr_log_badprocpid("smaps");
48360+ return 0;
48361+ }
48362+#endif
48363 memset(&mss, 0, sizeof mss);
48364- mss.vma = vma;
48365- /* mmap_sem is held in m_start */
48366- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48367- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48368-
48369+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48370+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
48371+#endif
48372+ mss.vma = vma;
48373+ /* mmap_sem is held in m_start */
48374+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48375+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48376+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48377+ }
48378+#endif
48379 show_map_vma(m, vma, is_pid);
48380
48381 seq_printf(m,
48382@@ -505,7 +552,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
48383 "KernelPageSize: %8lu kB\n"
48384 "MMUPageSize: %8lu kB\n"
48385 "Locked: %8lu kB\n",
48386+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48387+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
48388+#else
48389 (vma->vm_end - vma->vm_start) >> 10,
48390+#endif
48391 mss.resident >> 10,
48392 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
48393 mss.shared_clean >> 10,
48394@@ -1138,6 +1189,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
48395 int n;
48396 char buffer[50];
48397
48398+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48399+ if (current->exec_id != m->exec_id) {
48400+ gr_log_badprocpid("numa_maps");
48401+ return 0;
48402+ }
48403+#endif
48404+
48405 if (!mm)
48406 return 0;
48407
48408@@ -1155,11 +1213,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
48409 mpol_to_str(buffer, sizeof(buffer), pol, 0);
48410 mpol_cond_put(pol);
48411
48412+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48413+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
48414+#else
48415 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
48416+#endif
48417
48418 if (file) {
48419 seq_printf(m, " file=");
48420- seq_path(m, &file->f_path, "\n\t= ");
48421+ seq_path(m, &file->f_path, "\n\t\\= ");
48422 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
48423 seq_printf(m, " heap");
48424 } else {
48425diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
48426index 74fe164..899e77b 100644
48427--- a/fs/proc/task_nommu.c
48428+++ b/fs/proc/task_nommu.c
48429@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48430 else
48431 bytes += kobjsize(mm);
48432
48433- if (current->fs && current->fs->users > 1)
48434+ if (current->fs && atomic_read(&current->fs->users) > 1)
48435 sbytes += kobjsize(current->fs);
48436 else
48437 bytes += kobjsize(current->fs);
48438@@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
48439
48440 if (file) {
48441 pad_len_spaces(m, len);
48442- seq_path(m, &file->f_path, "");
48443+ seq_path(m, &file->f_path, "\n\\");
48444 } else if (mm) {
48445 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
48446
48447diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
48448index d67908b..d13f6a6 100644
48449--- a/fs/quota/netlink.c
48450+++ b/fs/quota/netlink.c
48451@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
48452 void quota_send_warning(short type, unsigned int id, dev_t dev,
48453 const char warntype)
48454 {
48455- static atomic_t seq;
48456+ static atomic_unchecked_t seq;
48457 struct sk_buff *skb;
48458 void *msg_head;
48459 int ret;
48460@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
48461 "VFS: Not enough memory to send quota warning.\n");
48462 return;
48463 }
48464- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
48465+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
48466 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
48467 if (!msg_head) {
48468 printk(KERN_ERR
48469diff --git a/fs/readdir.c b/fs/readdir.c
48470index cc0a822..43cb195 100644
48471--- a/fs/readdir.c
48472+++ b/fs/readdir.c
48473@@ -17,6 +17,7 @@
48474 #include <linux/security.h>
48475 #include <linux/syscalls.h>
48476 #include <linux/unistd.h>
48477+#include <linux/namei.h>
48478
48479 #include <asm/uaccess.h>
48480
48481@@ -67,6 +68,7 @@ struct old_linux_dirent {
48482
48483 struct readdir_callback {
48484 struct old_linux_dirent __user * dirent;
48485+ struct file * file;
48486 int result;
48487 };
48488
48489@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
48490 buf->result = -EOVERFLOW;
48491 return -EOVERFLOW;
48492 }
48493+
48494+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48495+ return 0;
48496+
48497 buf->result++;
48498 dirent = buf->dirent;
48499 if (!access_ok(VERIFY_WRITE, dirent,
48500@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
48501
48502 buf.result = 0;
48503 buf.dirent = dirent;
48504+ buf.file = file;
48505
48506 error = vfs_readdir(file, fillonedir, &buf);
48507 if (buf.result)
48508@@ -142,6 +149,7 @@ struct linux_dirent {
48509 struct getdents_callback {
48510 struct linux_dirent __user * current_dir;
48511 struct linux_dirent __user * previous;
48512+ struct file * file;
48513 int count;
48514 int error;
48515 };
48516@@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
48517 buf->error = -EOVERFLOW;
48518 return -EOVERFLOW;
48519 }
48520+
48521+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48522+ return 0;
48523+
48524 dirent = buf->previous;
48525 if (dirent) {
48526 if (__put_user(offset, &dirent->d_off))
48527@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
48528 buf.previous = NULL;
48529 buf.count = count;
48530 buf.error = 0;
48531+ buf.file = file;
48532
48533 error = vfs_readdir(file, filldir, &buf);
48534 if (error >= 0)
48535@@ -229,6 +242,7 @@ out:
48536 struct getdents_callback64 {
48537 struct linux_dirent64 __user * current_dir;
48538 struct linux_dirent64 __user * previous;
48539+ struct file *file;
48540 int count;
48541 int error;
48542 };
48543@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
48544 buf->error = -EINVAL; /* only used if we fail.. */
48545 if (reclen > buf->count)
48546 return -EINVAL;
48547+
48548+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48549+ return 0;
48550+
48551 dirent = buf->previous;
48552 if (dirent) {
48553 if (__put_user(offset, &dirent->d_off))
48554@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
48555
48556 buf.current_dir = dirent;
48557 buf.previous = NULL;
48558+ buf.file = file;
48559 buf.count = count;
48560 buf.error = 0;
48561
48562@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
48563 error = buf.error;
48564 lastdirent = buf.previous;
48565 if (lastdirent) {
48566- typeof(lastdirent->d_off) d_off = file->f_pos;
48567+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
48568 if (__put_user(d_off, &lastdirent->d_off))
48569 error = -EFAULT;
48570 else
48571diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
48572index 2b7882b..1c5ef48 100644
48573--- a/fs/reiserfs/do_balan.c
48574+++ b/fs/reiserfs/do_balan.c
48575@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
48576 return;
48577 }
48578
48579- atomic_inc(&(fs_generation(tb->tb_sb)));
48580+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
48581 do_balance_starts(tb);
48582
48583 /* balance leaf returns 0 except if combining L R and S into
48584diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
48585index 2c1ade6..8c59d8d 100644
48586--- a/fs/reiserfs/procfs.c
48587+++ b/fs/reiserfs/procfs.c
48588@@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
48589 "SMALL_TAILS " : "NO_TAILS ",
48590 replay_only(sb) ? "REPLAY_ONLY " : "",
48591 convert_reiserfs(sb) ? "CONV " : "",
48592- atomic_read(&r->s_generation_counter),
48593+ atomic_read_unchecked(&r->s_generation_counter),
48594 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
48595 SF(s_do_balance), SF(s_unneeded_left_neighbor),
48596 SF(s_good_search_by_key_reada), SF(s_bmaps),
48597diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
48598index a59d271..e12d1cf 100644
48599--- a/fs/reiserfs/reiserfs.h
48600+++ b/fs/reiserfs/reiserfs.h
48601@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
48602 /* Comment? -Hans */
48603 wait_queue_head_t s_wait;
48604 /* To be obsoleted soon by per buffer seals.. -Hans */
48605- atomic_t s_generation_counter; // increased by one every time the
48606+ atomic_unchecked_t s_generation_counter; // increased by one every time the
48607 // tree gets re-balanced
48608 unsigned long s_properties; /* File system properties. Currently holds
48609 on-disk FS format */
48610@@ -1973,7 +1973,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
48611 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
48612
48613 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
48614-#define get_generation(s) atomic_read (&fs_generation(s))
48615+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
48616 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
48617 #define __fs_changed(gen,s) (gen != get_generation (s))
48618 #define fs_changed(gen,s) \
48619diff --git a/fs/select.c b/fs/select.c
48620index 17d33d0..da0bf5c 100644
48621--- a/fs/select.c
48622+++ b/fs/select.c
48623@@ -20,6 +20,7 @@
48624 #include <linux/export.h>
48625 #include <linux/slab.h>
48626 #include <linux/poll.h>
48627+#include <linux/security.h>
48628 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
48629 #include <linux/file.h>
48630 #include <linux/fdtable.h>
48631@@ -833,6 +834,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
48632 struct poll_list *walk = head;
48633 unsigned long todo = nfds;
48634
48635+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
48636 if (nfds > rlimit(RLIMIT_NOFILE))
48637 return -EINVAL;
48638
48639diff --git a/fs/seq_file.c b/fs/seq_file.c
48640index 0cbd049..cab1127 100644
48641--- a/fs/seq_file.c
48642+++ b/fs/seq_file.c
48643@@ -9,6 +9,7 @@
48644 #include <linux/export.h>
48645 #include <linux/seq_file.h>
48646 #include <linux/slab.h>
48647+#include <linux/sched.h>
48648
48649 #include <asm/uaccess.h>
48650 #include <asm/page.h>
48651@@ -56,6 +57,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
48652 memset(p, 0, sizeof(*p));
48653 mutex_init(&p->lock);
48654 p->op = op;
48655+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48656+ p->exec_id = current->exec_id;
48657+#endif
48658
48659 /*
48660 * Wrappers around seq_open(e.g. swaps_open) need to be
48661@@ -567,7 +571,7 @@ static void single_stop(struct seq_file *p, void *v)
48662 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
48663 void *data)
48664 {
48665- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
48666+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
48667 int res = -ENOMEM;
48668
48669 if (op) {
48670diff --git a/fs/splice.c b/fs/splice.c
48671index f847684..156619e 100644
48672--- a/fs/splice.c
48673+++ b/fs/splice.c
48674@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
48675 pipe_lock(pipe);
48676
48677 for (;;) {
48678- if (!pipe->readers) {
48679+ if (!atomic_read(&pipe->readers)) {
48680 send_sig(SIGPIPE, current, 0);
48681 if (!ret)
48682 ret = -EPIPE;
48683@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
48684 do_wakeup = 0;
48685 }
48686
48687- pipe->waiting_writers++;
48688+ atomic_inc(&pipe->waiting_writers);
48689 pipe_wait(pipe);
48690- pipe->waiting_writers--;
48691+ atomic_dec(&pipe->waiting_writers);
48692 }
48693
48694 pipe_unlock(pipe);
48695@@ -560,7 +560,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
48696 old_fs = get_fs();
48697 set_fs(get_ds());
48698 /* The cast to a user pointer is valid due to the set_fs() */
48699- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
48700+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
48701 set_fs(old_fs);
48702
48703 return res;
48704@@ -575,7 +575,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
48705 old_fs = get_fs();
48706 set_fs(get_ds());
48707 /* The cast to a user pointer is valid due to the set_fs() */
48708- res = vfs_write(file, (const char __user *)buf, count, &pos);
48709+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
48710 set_fs(old_fs);
48711
48712 return res;
48713@@ -626,7 +626,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
48714 goto err;
48715
48716 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
48717- vec[i].iov_base = (void __user *) page_address(page);
48718+ vec[i].iov_base = (void __force_user *) page_address(page);
48719 vec[i].iov_len = this_len;
48720 spd.pages[i] = page;
48721 spd.nr_pages++;
48722@@ -845,10 +845,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
48723 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
48724 {
48725 while (!pipe->nrbufs) {
48726- if (!pipe->writers)
48727+ if (!atomic_read(&pipe->writers))
48728 return 0;
48729
48730- if (!pipe->waiting_writers && sd->num_spliced)
48731+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
48732 return 0;
48733
48734 if (sd->flags & SPLICE_F_NONBLOCK)
48735@@ -1181,7 +1181,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
48736 * out of the pipe right after the splice_to_pipe(). So set
48737 * PIPE_READERS appropriately.
48738 */
48739- pipe->readers = 1;
48740+ atomic_set(&pipe->readers, 1);
48741
48742 current->splice_pipe = pipe;
48743 }
48744@@ -1733,9 +1733,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48745 ret = -ERESTARTSYS;
48746 break;
48747 }
48748- if (!pipe->writers)
48749+ if (!atomic_read(&pipe->writers))
48750 break;
48751- if (!pipe->waiting_writers) {
48752+ if (!atomic_read(&pipe->waiting_writers)) {
48753 if (flags & SPLICE_F_NONBLOCK) {
48754 ret = -EAGAIN;
48755 break;
48756@@ -1767,7 +1767,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48757 pipe_lock(pipe);
48758
48759 while (pipe->nrbufs >= pipe->buffers) {
48760- if (!pipe->readers) {
48761+ if (!atomic_read(&pipe->readers)) {
48762 send_sig(SIGPIPE, current, 0);
48763 ret = -EPIPE;
48764 break;
48765@@ -1780,9 +1780,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48766 ret = -ERESTARTSYS;
48767 break;
48768 }
48769- pipe->waiting_writers++;
48770+ atomic_inc(&pipe->waiting_writers);
48771 pipe_wait(pipe);
48772- pipe->waiting_writers--;
48773+ atomic_dec(&pipe->waiting_writers);
48774 }
48775
48776 pipe_unlock(pipe);
48777@@ -1818,14 +1818,14 @@ retry:
48778 pipe_double_lock(ipipe, opipe);
48779
48780 do {
48781- if (!opipe->readers) {
48782+ if (!atomic_read(&opipe->readers)) {
48783 send_sig(SIGPIPE, current, 0);
48784 if (!ret)
48785 ret = -EPIPE;
48786 break;
48787 }
48788
48789- if (!ipipe->nrbufs && !ipipe->writers)
48790+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
48791 break;
48792
48793 /*
48794@@ -1922,7 +1922,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
48795 pipe_double_lock(ipipe, opipe);
48796
48797 do {
48798- if (!opipe->readers) {
48799+ if (!atomic_read(&opipe->readers)) {
48800 send_sig(SIGPIPE, current, 0);
48801 if (!ret)
48802 ret = -EPIPE;
48803@@ -1967,7 +1967,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
48804 * return EAGAIN if we have the potential of some data in the
48805 * future, otherwise just return 0
48806 */
48807- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
48808+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
48809 ret = -EAGAIN;
48810
48811 pipe_unlock(ipipe);
48812diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
48813index 35a36d3..23424b2 100644
48814--- a/fs/sysfs/dir.c
48815+++ b/fs/sysfs/dir.c
48816@@ -657,6 +657,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
48817 struct sysfs_dirent *sd;
48818 int rc;
48819
48820+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
48821+ const char *parent_name = parent_sd->s_name;
48822+
48823+ mode = S_IFDIR | S_IRWXU;
48824+
48825+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
48826+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
48827+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
48828+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
48829+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
48830+#endif
48831+
48832 /* allocate */
48833 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
48834 if (!sd)
48835diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
48836index 00012e3..8392349 100644
48837--- a/fs/sysfs/file.c
48838+++ b/fs/sysfs/file.c
48839@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
48840
48841 struct sysfs_open_dirent {
48842 atomic_t refcnt;
48843- atomic_t event;
48844+ atomic_unchecked_t event;
48845 wait_queue_head_t poll;
48846 struct list_head buffers; /* goes through sysfs_buffer.list */
48847 };
48848@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
48849 if (!sysfs_get_active(attr_sd))
48850 return -ENODEV;
48851
48852- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
48853+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
48854 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
48855
48856 sysfs_put_active(attr_sd);
48857@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
48858 return -ENOMEM;
48859
48860 atomic_set(&new_od->refcnt, 0);
48861- atomic_set(&new_od->event, 1);
48862+ atomic_set_unchecked(&new_od->event, 1);
48863 init_waitqueue_head(&new_od->poll);
48864 INIT_LIST_HEAD(&new_od->buffers);
48865 goto retry;
48866@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
48867
48868 sysfs_put_active(attr_sd);
48869
48870- if (buffer->event != atomic_read(&od->event))
48871+ if (buffer->event != atomic_read_unchecked(&od->event))
48872 goto trigger;
48873
48874 return DEFAULT_POLLMASK;
48875@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
48876
48877 od = sd->s_attr.open;
48878 if (od) {
48879- atomic_inc(&od->event);
48880+ atomic_inc_unchecked(&od->event);
48881 wake_up_interruptible(&od->poll);
48882 }
48883
48884diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
48885index a7ac78f..02158e1 100644
48886--- a/fs/sysfs/symlink.c
48887+++ b/fs/sysfs/symlink.c
48888@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48889
48890 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
48891 {
48892- char *page = nd_get_link(nd);
48893+ const char *page = nd_get_link(nd);
48894 if (!IS_ERR(page))
48895 free_page((unsigned long)page);
48896 }
48897diff --git a/fs/udf/misc.c b/fs/udf/misc.c
48898index c175b4d..8f36a16 100644
48899--- a/fs/udf/misc.c
48900+++ b/fs/udf/misc.c
48901@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
48902
48903 u8 udf_tag_checksum(const struct tag *t)
48904 {
48905- u8 *data = (u8 *)t;
48906+ const u8 *data = (const u8 *)t;
48907 u8 checksum = 0;
48908 int i;
48909 for (i = 0; i < sizeof(struct tag); ++i)
48910diff --git a/fs/utimes.c b/fs/utimes.c
48911index ba653f3..06ea4b1 100644
48912--- a/fs/utimes.c
48913+++ b/fs/utimes.c
48914@@ -1,6 +1,7 @@
48915 #include <linux/compiler.h>
48916 #include <linux/file.h>
48917 #include <linux/fs.h>
48918+#include <linux/security.h>
48919 #include <linux/linkage.h>
48920 #include <linux/mount.h>
48921 #include <linux/namei.h>
48922@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
48923 goto mnt_drop_write_and_out;
48924 }
48925 }
48926+
48927+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
48928+ error = -EACCES;
48929+ goto mnt_drop_write_and_out;
48930+ }
48931+
48932 mutex_lock(&inode->i_mutex);
48933 error = notify_change(path->dentry, &newattrs);
48934 mutex_unlock(&inode->i_mutex);
48935diff --git a/fs/xattr.c b/fs/xattr.c
48936index 3c8c1cc..a83c398 100644
48937--- a/fs/xattr.c
48938+++ b/fs/xattr.c
48939@@ -316,7 +316,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
48940 * Extended attribute SET operations
48941 */
48942 static long
48943-setxattr(struct dentry *d, const char __user *name, const void __user *value,
48944+setxattr(struct path *path, const char __user *name, const void __user *value,
48945 size_t size, int flags)
48946 {
48947 int error;
48948@@ -349,7 +349,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
48949 }
48950 }
48951
48952- error = vfs_setxattr(d, kname, kvalue, size, flags);
48953+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
48954+ error = -EACCES;
48955+ goto out;
48956+ }
48957+
48958+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
48959 out:
48960 if (vvalue)
48961 vfree(vvalue);
48962@@ -370,7 +375,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
48963 return error;
48964 error = mnt_want_write(path.mnt);
48965 if (!error) {
48966- error = setxattr(path.dentry, name, value, size, flags);
48967+ error = setxattr(&path, name, value, size, flags);
48968 mnt_drop_write(path.mnt);
48969 }
48970 path_put(&path);
48971@@ -389,7 +394,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
48972 return error;
48973 error = mnt_want_write(path.mnt);
48974 if (!error) {
48975- error = setxattr(path.dentry, name, value, size, flags);
48976+ error = setxattr(&path, name, value, size, flags);
48977 mnt_drop_write(path.mnt);
48978 }
48979 path_put(&path);
48980@@ -400,17 +405,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
48981 const void __user *,value, size_t, size, int, flags)
48982 {
48983 struct file *f;
48984- struct dentry *dentry;
48985 int error = -EBADF;
48986
48987 f = fget(fd);
48988 if (!f)
48989 return error;
48990- dentry = f->f_path.dentry;
48991- audit_inode(NULL, dentry);
48992+ audit_inode(NULL, f->f_path.dentry);
48993 error = mnt_want_write_file(f);
48994 if (!error) {
48995- error = setxattr(dentry, name, value, size, flags);
48996+ error = setxattr(&f->f_path, name, value, size, flags);
48997 mnt_drop_write_file(f);
48998 }
48999 fput(f);
49000diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
49001index 69d06b0..c0996e5 100644
49002--- a/fs/xattr_acl.c
49003+++ b/fs/xattr_acl.c
49004@@ -17,8 +17,8 @@
49005 struct posix_acl *
49006 posix_acl_from_xattr(const void *value, size_t size)
49007 {
49008- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
49009- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
49010+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
49011+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
49012 int count;
49013 struct posix_acl *acl;
49014 struct posix_acl_entry *acl_e;
49015diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
49016index 85e7e32..5344e52 100644
49017--- a/fs/xfs/xfs_bmap.c
49018+++ b/fs/xfs/xfs_bmap.c
49019@@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
49020 int nmap,
49021 int ret_nmap);
49022 #else
49023-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
49024+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
49025 #endif /* DEBUG */
49026
49027 STATIC int
49028diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
49029index 79d05e8..e3e5861 100644
49030--- a/fs/xfs/xfs_dir2_sf.c
49031+++ b/fs/xfs/xfs_dir2_sf.c
49032@@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
49033 }
49034
49035 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
49036- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
49037+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
49038+ char name[sfep->namelen];
49039+ memcpy(name, sfep->name, sfep->namelen);
49040+ if (filldir(dirent, name, sfep->namelen,
49041+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
49042+ *offset = off & 0x7fffffff;
49043+ return 0;
49044+ }
49045+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
49046 off & 0x7fffffff, ino, DT_UNKNOWN)) {
49047 *offset = off & 0x7fffffff;
49048 return 0;
49049diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
49050index 91f8ff5..0ce68f9 100644
49051--- a/fs/xfs/xfs_ioctl.c
49052+++ b/fs/xfs/xfs_ioctl.c
49053@@ -128,7 +128,7 @@ xfs_find_handle(
49054 }
49055
49056 error = -EFAULT;
49057- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
49058+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
49059 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
49060 goto out_put;
49061
49062diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
49063index 3011b87..1ab03e9 100644
49064--- a/fs/xfs/xfs_iops.c
49065+++ b/fs/xfs/xfs_iops.c
49066@@ -397,7 +397,7 @@ xfs_vn_put_link(
49067 struct nameidata *nd,
49068 void *p)
49069 {
49070- char *s = nd_get_link(nd);
49071+ const char *s = nd_get_link(nd);
49072
49073 if (!IS_ERR(s))
49074 kfree(s);
49075diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
49076new file mode 100644
49077index 0000000..2645296
49078--- /dev/null
49079+++ b/grsecurity/Kconfig
49080@@ -0,0 +1,1079 @@
49081+#
49082+# grecurity configuration
49083+#
49084+
49085+menu "Grsecurity"
49086+
49087+config GRKERNSEC
49088+ bool "Grsecurity"
49089+ select CRYPTO
49090+ select CRYPTO_SHA256
49091+ help
49092+ If you say Y here, you will be able to configure many features
49093+ that will enhance the security of your system. It is highly
49094+ recommended that you say Y here and read through the help
49095+ for each option so that you fully understand the features and
49096+ can evaluate their usefulness for your machine.
49097+
49098+choice
49099+ prompt "Security Level"
49100+ depends on GRKERNSEC
49101+ default GRKERNSEC_CUSTOM
49102+
49103+config GRKERNSEC_LOW
49104+ bool "Low"
49105+ select GRKERNSEC_LINK
49106+ select GRKERNSEC_FIFO
49107+ select GRKERNSEC_RANDNET
49108+ select GRKERNSEC_DMESG
49109+ select GRKERNSEC_CHROOT
49110+ select GRKERNSEC_CHROOT_CHDIR
49111+
49112+ help
49113+ If you choose this option, several of the grsecurity options will
49114+ be enabled that will give you greater protection against a number
49115+ of attacks, while assuring that none of your software will have any
49116+ conflicts with the additional security measures. If you run a lot
49117+ of unusual software, or you are having problems with the higher
49118+ security levels, you should say Y here. With this option, the
49119+ following features are enabled:
49120+
49121+ - Linking restrictions
49122+ - FIFO restrictions
49123+ - Restricted dmesg
49124+ - Enforced chdir("/") on chroot
49125+ - Runtime module disabling
49126+
49127+config GRKERNSEC_MEDIUM
49128+ bool "Medium"
49129+ select PAX
49130+ select PAX_EI_PAX
49131+ select PAX_PT_PAX_FLAGS
49132+ select PAX_HAVE_ACL_FLAGS
49133+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49134+ select GRKERNSEC_CHROOT
49135+ select GRKERNSEC_CHROOT_SYSCTL
49136+ select GRKERNSEC_LINK
49137+ select GRKERNSEC_FIFO
49138+ select GRKERNSEC_DMESG
49139+ select GRKERNSEC_RANDNET
49140+ select GRKERNSEC_FORKFAIL
49141+ select GRKERNSEC_TIME
49142+ select GRKERNSEC_SIGNAL
49143+ select GRKERNSEC_CHROOT
49144+ select GRKERNSEC_CHROOT_UNIX
49145+ select GRKERNSEC_CHROOT_MOUNT
49146+ select GRKERNSEC_CHROOT_PIVOT
49147+ select GRKERNSEC_CHROOT_DOUBLE
49148+ select GRKERNSEC_CHROOT_CHDIR
49149+ select GRKERNSEC_CHROOT_MKNOD
49150+ select GRKERNSEC_PROC
49151+ select GRKERNSEC_PROC_USERGROUP
49152+ select PAX_RANDUSTACK
49153+ select PAX_ASLR
49154+ select PAX_RANDMMAP
49155+ select PAX_REFCOUNT if (X86 || SPARC64)
49156+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
49157+
49158+ help
49159+ If you say Y here, several features in addition to those included
49160+ in the low additional security level will be enabled. These
49161+ features provide even more security to your system, though in rare
49162+ cases they may be incompatible with very old or poorly written
49163+ software. If you enable this option, make sure that your auth
49164+ service (identd) is running as gid 1001. With this option,
49165+ the following features (in addition to those provided in the
49166+ low additional security level) will be enabled:
49167+
49168+ - Failed fork logging
49169+ - Time change logging
49170+ - Signal logging
49171+ - Deny mounts in chroot
49172+ - Deny double chrooting
49173+ - Deny sysctl writes in chroot
49174+ - Deny mknod in chroot
49175+ - Deny access to abstract AF_UNIX sockets out of chroot
49176+ - Deny pivot_root in chroot
49177+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
49178+ - /proc restrictions with special GID set to 10 (usually wheel)
49179+ - Address Space Layout Randomization (ASLR)
49180+ - Prevent exploitation of most refcount overflows
49181+ - Bounds checking of copying between the kernel and userland
49182+
49183+config GRKERNSEC_HIGH
49184+ bool "High"
49185+ select GRKERNSEC_LINK
49186+ select GRKERNSEC_FIFO
49187+ select GRKERNSEC_DMESG
49188+ select GRKERNSEC_FORKFAIL
49189+ select GRKERNSEC_TIME
49190+ select GRKERNSEC_SIGNAL
49191+ select GRKERNSEC_CHROOT
49192+ select GRKERNSEC_CHROOT_SHMAT
49193+ select GRKERNSEC_CHROOT_UNIX
49194+ select GRKERNSEC_CHROOT_MOUNT
49195+ select GRKERNSEC_CHROOT_FCHDIR
49196+ select GRKERNSEC_CHROOT_PIVOT
49197+ select GRKERNSEC_CHROOT_DOUBLE
49198+ select GRKERNSEC_CHROOT_CHDIR
49199+ select GRKERNSEC_CHROOT_MKNOD
49200+ select GRKERNSEC_CHROOT_CAPS
49201+ select GRKERNSEC_CHROOT_SYSCTL
49202+ select GRKERNSEC_CHROOT_FINDTASK
49203+ select GRKERNSEC_SYSFS_RESTRICT
49204+ select GRKERNSEC_PROC
49205+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49206+ select GRKERNSEC_HIDESYM
49207+ select GRKERNSEC_BRUTE
49208+ select GRKERNSEC_PROC_USERGROUP
49209+ select GRKERNSEC_KMEM
49210+ select GRKERNSEC_RESLOG
49211+ select GRKERNSEC_RANDNET
49212+ select GRKERNSEC_PROC_ADD
49213+ select GRKERNSEC_CHROOT_CHMOD
49214+ select GRKERNSEC_CHROOT_NICE
49215+ select GRKERNSEC_SETXID if (X86 || SPARC64 || PPC || ARM || MIPS)
49216+ select GRKERNSEC_AUDIT_MOUNT
49217+ select GRKERNSEC_MODHARDEN if (MODULES)
49218+ select GRKERNSEC_HARDEN_PTRACE
49219+ select GRKERNSEC_PTRACE_READEXEC
49220+ select GRKERNSEC_VM86 if (X86_32)
49221+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
49222+ select PAX
49223+ select PAX_RANDUSTACK
49224+ select PAX_ASLR
49225+ select PAX_RANDMMAP
49226+ select PAX_NOEXEC
49227+ select PAX_MPROTECT
49228+ select PAX_EI_PAX
49229+ select PAX_PT_PAX_FLAGS
49230+ select PAX_HAVE_ACL_FLAGS
49231+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
49232+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
49233+ select PAX_RANDKSTACK if (X86_TSC && X86)
49234+ select PAX_SEGMEXEC if (X86_32)
49235+ select PAX_PAGEEXEC
49236+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
49237+ select PAX_EMUTRAMP if (PARISC)
49238+ select PAX_EMUSIGRT if (PARISC)
49239+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
49240+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
49241+ select PAX_REFCOUNT if (X86 || SPARC64)
49242+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
49243+ help
49244+ If you say Y here, many of the features of grsecurity will be
49245+ enabled, which will protect you against many kinds of attacks
49246+ against your system. The heightened security comes at a cost
49247+ of an increased chance of incompatibilities with rare software
49248+ on your machine. Since this security level enables PaX, you should
49249+ view <http://pax.grsecurity.net> and read about the PaX
49250+ project. While you are there, download chpax and run it on
49251+ binaries that cause problems with PaX. Also remember that
49252+ since the /proc restrictions are enabled, you must run your
49253+ identd as gid 1001. This security level enables the following
49254+ features in addition to those listed in the low and medium
49255+ security levels:
49256+
49257+ - Additional /proc restrictions
49258+ - Chmod restrictions in chroot
49259+ - No signals, ptrace, or viewing of processes outside of chroot
49260+ - Capability restrictions in chroot
49261+ - Deny fchdir out of chroot
49262+ - Priority restrictions in chroot
49263+ - Segmentation-based implementation of PaX
49264+ - Mprotect restrictions
49265+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
49266+ - Kernel stack randomization
49267+ - Mount/unmount/remount logging
49268+ - Kernel symbol hiding
49269+ - Hardening of module auto-loading
49270+ - Ptrace restrictions
49271+ - Restricted vm86 mode
49272+ - Restricted sysfs/debugfs
49273+ - Active kernel exploit response
49274+
49275+config GRKERNSEC_CUSTOM
49276+ bool "Custom"
49277+ help
49278+ If you say Y here, you will be able to configure every grsecurity
49279+ option, which allows you to enable many more features that aren't
49280+ covered in the basic security levels. These additional features
49281+ include TPE, socket restrictions, and the sysctl system for
49282+ grsecurity. It is advised that you read through the help for
49283+ each option to determine its usefulness in your situation.
49284+
49285+endchoice
49286+
49287+menu "Memory Protections"
49288+depends on GRKERNSEC
49289+
49290+config GRKERNSEC_KMEM
49291+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
49292+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
49293+ help
49294+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
49295+ be written to or read from to modify or leak the contents of the running
49296+ kernel. /dev/port will also not be allowed to be opened. If you have module
49297+ support disabled, enabling this will close up four ways that are
49298+ currently used to insert malicious code into the running kernel.
49299+ Even with all these features enabled, we still highly recommend that
49300+ you use the RBAC system, as it is still possible for an attacker to
49301+ modify the running kernel through privileged I/O granted by ioperm/iopl.
49302+ If you are not using XFree86, you may be able to stop this additional
49303+ case by enabling the 'Disable privileged I/O' option. Though nothing
49304+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
49305+ but only to video memory, which is the only writing we allow in this
49306+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
49307+ not be allowed to mprotect it with PROT_WRITE later.
49308+ It is highly recommended that you say Y here if you meet all the
49309+ conditions above.
49310+
49311+config GRKERNSEC_VM86
49312+ bool "Restrict VM86 mode"
49313+ depends on X86_32
49314+
49315+ help
49316+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
49317+ make use of a special execution mode on 32bit x86 processors called
49318+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
49319+ video cards and will still work with this option enabled. The purpose
49320+ of the option is to prevent exploitation of emulation errors in
49321+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
49322+ Nearly all users should be able to enable this option.
49323+
49324+config GRKERNSEC_IO
49325+ bool "Disable privileged I/O"
49326+ depends on X86
49327+ select RTC_CLASS
49328+ select RTC_INTF_DEV
49329+ select RTC_DRV_CMOS
49330+
49331+ help
49332+ If you say Y here, all ioperm and iopl calls will return an error.
49333+ Ioperm and iopl can be used to modify the running kernel.
49334+ Unfortunately, some programs need this access to operate properly,
49335+ the most notable of which are XFree86 and hwclock. hwclock can be
49336+ remedied by having RTC support in the kernel, so real-time
49337+ clock support is enabled if this option is enabled, to ensure
49338+ that hwclock operates correctly. XFree86 still will not
49339+ operate correctly with this option enabled, so DO NOT CHOOSE Y
49340+ IF YOU USE XFree86. If you use XFree86 and you still want to
49341+ protect your kernel against modification, use the RBAC system.
49342+
49343+config GRKERNSEC_PROC_MEMMAP
49344+ bool "Harden ASLR against information leaks and entropy reduction"
49345+ default y if (PAX_NOEXEC || PAX_ASLR)
49346+ depends on PAX_NOEXEC || PAX_ASLR
49347+ help
49348+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
49349+ give no information about the addresses of its mappings if
49350+ PaX features that rely on random addresses are enabled on the task.
49351+ In addition to sanitizing this information and disabling other
49352+ dangerous sources of information, this option causes reads of sensitive
49353+ /proc/<pid> entries where the file descriptor was opened in a different
49354+ task than the one performing the read. Such attempts are logged.
49355+ This option also limits argv/env strings for suid/sgid binaries
49356+ to 512KB to prevent a complete exhaustion of the stack entropy provided
49357+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
49358+ binaries to prevent alternative mmap layouts from being abused.
49359+
49360+ If you use PaX it is essential that you say Y here as it closes up
49361+ several holes that make full ASLR useless locally.
49362+
49363+config GRKERNSEC_BRUTE
49364+ bool "Deter exploit bruteforcing"
49365+ help
49366+ If you say Y here, attempts to bruteforce exploits against forking
49367+ daemons such as apache or sshd, as well as against suid/sgid binaries
49368+ will be deterred. When a child of a forking daemon is killed by PaX
49369+ or crashes due to an illegal instruction or other suspicious signal,
49370+ the parent process will be delayed 30 seconds upon every subsequent
49371+ fork until the administrator is able to assess the situation and
49372+ restart the daemon.
49373+ In the suid/sgid case, the attempt is logged, the user has all their
49374+ processes terminated, and they are prevented from executing any further
49375+ processes for 15 minutes.
49376+ It is recommended that you also enable signal logging in the auditing
49377+ section so that logs are generated when a process triggers a suspicious
49378+ signal.
49379+ If the sysctl option is enabled, a sysctl option with name
49380+ "deter_bruteforce" is created.
49381+
49382+
49383+config GRKERNSEC_MODHARDEN
49384+ bool "Harden module auto-loading"
49385+ depends on MODULES
49386+ help
49387+ If you say Y here, module auto-loading in response to use of some
49388+ feature implemented by an unloaded module will be restricted to
49389+ root users. Enabling this option helps defend against attacks
49390+ by unprivileged users who abuse the auto-loading behavior to
49391+ cause a vulnerable module to load that is then exploited.
49392+
49393+ If this option prevents a legitimate use of auto-loading for a
49394+ non-root user, the administrator can execute modprobe manually
49395+ with the exact name of the module mentioned in the alert log.
49396+ Alternatively, the administrator can add the module to the list
49397+ of modules loaded at boot by modifying init scripts.
49398+
49399+ Modification of init scripts will most likely be needed on
49400+ Ubuntu servers with encrypted home directory support enabled,
49401+ as the first non-root user logging in will cause the ecb(aes),
49402+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
49403+
49404+config GRKERNSEC_HIDESYM
49405+ bool "Hide kernel symbols"
49406+ help
49407+ If you say Y here, getting information on loaded modules, and
49408+ displaying all kernel symbols through a syscall will be restricted
49409+ to users with CAP_SYS_MODULE. For software compatibility reasons,
49410+ /proc/kallsyms will be restricted to the root user. The RBAC
49411+ system can hide that entry even from root.
49412+
49413+ This option also prevents leaking of kernel addresses through
49414+ several /proc entries.
49415+
49416+ Note that this option is only effective provided the following
49417+ conditions are met:
49418+ 1) The kernel using grsecurity is not precompiled by some distribution
49419+ 2) You have also enabled GRKERNSEC_DMESG
49420+ 3) You are using the RBAC system and hiding other files such as your
49421+ kernel image and System.map. Alternatively, enabling this option
49422+ causes the permissions on /boot, /lib/modules, and the kernel
49423+ source directory to change at compile time to prevent
49424+ reading by non-root users.
49425+ If the above conditions are met, this option will aid in providing a
49426+ useful protection against local kernel exploitation of overflows
49427+ and arbitrary read/write vulnerabilities.
49428+
49429+config GRKERNSEC_KERN_LOCKOUT
49430+ bool "Active kernel exploit response"
49431+ depends on X86 || ARM || PPC || SPARC
49432+ help
49433+ If you say Y here, when a PaX alert is triggered due to suspicious
49434+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
49435+ or an OOPs occurs due to bad memory accesses, instead of just
49436+ terminating the offending process (and potentially allowing
49437+ a subsequent exploit from the same user), we will take one of two
49438+ actions:
49439+ If the user was root, we will panic the system
49440+ If the user was non-root, we will log the attempt, terminate
49441+ all processes owned by the user, then prevent them from creating
49442+ any new processes until the system is restarted
49443+ This deters repeated kernel exploitation/bruteforcing attempts
49444+ and is useful for later forensics.
49445+
49446+endmenu
49447+menu "Role Based Access Control Options"
49448+depends on GRKERNSEC
49449+
49450+config GRKERNSEC_RBAC_DEBUG
49451+ bool
49452+
49453+config GRKERNSEC_NO_RBAC
49454+ bool "Disable RBAC system"
49455+ help
49456+ If you say Y here, the /dev/grsec device will be removed from the kernel,
49457+ preventing the RBAC system from being enabled. You should only say Y
49458+ here if you have no intention of using the RBAC system, so as to prevent
49459+ an attacker with root access from misusing the RBAC system to hide files
49460+ and processes when loadable module support and /dev/[k]mem have been
49461+ locked down.
49462+
49463+config GRKERNSEC_ACL_HIDEKERN
49464+ bool "Hide kernel processes"
49465+ help
49466+ If you say Y here, all kernel threads will be hidden to all
49467+ processes but those whose subject has the "view hidden processes"
49468+ flag.
49469+
49470+config GRKERNSEC_ACL_MAXTRIES
49471+ int "Maximum tries before password lockout"
49472+ default 3
49473+ help
49474+ This option enforces the maximum number of times a user can attempt
49475+ to authorize themselves with the grsecurity RBAC system before being
49476+ denied the ability to attempt authorization again for a specified time.
49477+ The lower the number, the harder it will be to brute-force a password.
49478+
49479+config GRKERNSEC_ACL_TIMEOUT
49480+ int "Time to wait after max password tries, in seconds"
49481+ default 30
49482+ help
49483+ This option specifies the time the user must wait after attempting to
49484+ authorize to the RBAC system with the maximum number of invalid
49485+ passwords. The higher the number, the harder it will be to brute-force
49486+ a password.
49487+
49488+endmenu
49489+menu "Filesystem Protections"
49490+depends on GRKERNSEC
49491+
49492+config GRKERNSEC_PROC
49493+ bool "Proc restrictions"
49494+ help
49495+ If you say Y here, the permissions of the /proc filesystem
49496+ will be altered to enhance system security and privacy. You MUST
49497+ choose either a user only restriction or a user and group restriction.
49498+ Depending upon the option you choose, you can either restrict users to
49499+ see only the processes they themselves run, or choose a group that can
49500+ view all processes and files normally restricted to root if you choose
49501+ the "restrict to user only" option. NOTE: If you're running identd or
49502+ ntpd as a non-root user, you will have to run it as the group you
49503+ specify here.
49504+
49505+config GRKERNSEC_PROC_USER
49506+ bool "Restrict /proc to user only"
49507+ depends on GRKERNSEC_PROC
49508+ help
49509+ If you say Y here, non-root users will only be able to view their own
49510+ processes, and restricts them from viewing network-related information,
49511+ and viewing kernel symbol and module information.
49512+
49513+config GRKERNSEC_PROC_USERGROUP
49514+ bool "Allow special group"
49515+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
49516+ help
49517+ If you say Y here, you will be able to select a group that will be
49518+ able to view all processes and network-related information. If you've
49519+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
49520+ remain hidden. This option is useful if you want to run identd as
49521+ a non-root user.
49522+
49523+config GRKERNSEC_PROC_GID
49524+ int "GID for special group"
49525+ depends on GRKERNSEC_PROC_USERGROUP
49526+ default 1001
49527+
49528+config GRKERNSEC_PROC_ADD
49529+ bool "Additional restrictions"
49530+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
49531+ help
49532+ If you say Y here, additional restrictions will be placed on
49533+ /proc that keep normal users from viewing device information and
49534+ slabinfo information that could be useful for exploits.
49535+
49536+config GRKERNSEC_LINK
49537+ bool "Linking restrictions"
49538+ help
49539+ If you say Y here, /tmp race exploits will be prevented, since users
49540+ will no longer be able to follow symlinks owned by other users in
49541+ world-writable +t directories (e.g. /tmp), unless the owner of the
49542+ symlink is the owner of the directory. users will also not be
49543+ able to hardlink to files they do not own. If the sysctl option is
49544+ enabled, a sysctl option with name "linking_restrictions" is created.
49545+
49546+config GRKERNSEC_FIFO
49547+ bool "FIFO restrictions"
49548+ help
49549+ If you say Y here, users will not be able to write to FIFOs they don't
49550+ own in world-writable +t directories (e.g. /tmp), unless the owner of
49551+ the FIFO is the same owner of the directory it's held in. If the sysctl
49552+ option is enabled, a sysctl option with name "fifo_restrictions" is
49553+ created.
49554+
49555+config GRKERNSEC_SYSFS_RESTRICT
49556+ bool "Sysfs/debugfs restriction"
49557+ depends on SYSFS
49558+ help
49559+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
49560+ any filesystem normally mounted under it (e.g. debugfs) will be
49561+ mostly accessible only by root. These filesystems generally provide access
49562+ to hardware and debug information that isn't appropriate for unprivileged
49563+ users of the system. Sysfs and debugfs have also become a large source
49564+ of new vulnerabilities, ranging from infoleaks to local compromise.
49565+ There has been very little oversight with an eye toward security involved
49566+ in adding new exporters of information to these filesystems, so their
49567+ use is discouraged.
49568+ For reasons of compatibility, a few directories have been whitelisted
49569+ for access by non-root users:
49570+ /sys/fs/selinux
49571+ /sys/fs/fuse
49572+ /sys/devices/system/cpu
49573+
49574+config GRKERNSEC_ROFS
49575+ bool "Runtime read-only mount protection"
49576+ help
49577+ If you say Y here, a sysctl option with name "romount_protect" will
49578+ be created. By setting this option to 1 at runtime, filesystems
49579+ will be protected in the following ways:
49580+ * No new writable mounts will be allowed
49581+ * Existing read-only mounts won't be able to be remounted read/write
49582+ * Write operations will be denied on all block devices
49583+ This option acts independently of grsec_lock: once it is set to 1,
49584+ it cannot be turned off. Therefore, please be mindful of the resulting
49585+ behavior if this option is enabled in an init script on a read-only
49586+ filesystem. This feature is mainly intended for secure embedded systems.
49587+
49588+config GRKERNSEC_CHROOT
49589+ bool "Chroot jail restrictions"
49590+ help
49591+ If you say Y here, you will be able to choose several options that will
49592+ make breaking out of a chrooted jail much more difficult. If you
49593+ encounter no software incompatibilities with the following options, it
49594+ is recommended that you enable each one.
49595+
49596+config GRKERNSEC_CHROOT_MOUNT
49597+ bool "Deny mounts"
49598+ depends on GRKERNSEC_CHROOT
49599+ help
49600+ If you say Y here, processes inside a chroot will not be able to
49601+ mount or remount filesystems. If the sysctl option is enabled, a
49602+ sysctl option with name "chroot_deny_mount" is created.
49603+
49604+config GRKERNSEC_CHROOT_DOUBLE
49605+ bool "Deny double-chroots"
49606+ depends on GRKERNSEC_CHROOT
49607+ help
49608+ If you say Y here, processes inside a chroot will not be able to chroot
49609+ again outside the chroot. This is a widely used method of breaking
49610+ out of a chroot jail and should not be allowed. If the sysctl
49611+ option is enabled, a sysctl option with name
49612+ "chroot_deny_chroot" is created.
49613+
49614+config GRKERNSEC_CHROOT_PIVOT
49615+ bool "Deny pivot_root in chroot"
49616+ depends on GRKERNSEC_CHROOT
49617+ help
49618+ If you say Y here, processes inside a chroot will not be able to use
49619+ a function called pivot_root() that was introduced in Linux 2.3.41. It
49620+ works similar to chroot in that it changes the root filesystem. This
49621+ function could be misused in a chrooted process to attempt to break out
49622+ of the chroot, and therefore should not be allowed. If the sysctl
49623+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
49624+ created.
49625+
49626+config GRKERNSEC_CHROOT_CHDIR
49627+ bool "Enforce chdir(\"/\") on all chroots"
49628+ depends on GRKERNSEC_CHROOT
49629+ help
49630+ If you say Y here, the current working directory of all newly-chrooted
49631+ applications will be set to the the root directory of the chroot.
49632+ The man page on chroot(2) states:
49633+ Note that this call does not change the current working
49634+ directory, so that `.' can be outside the tree rooted at
49635+ `/'. In particular, the super-user can escape from a
49636+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
49637+
49638+ It is recommended that you say Y here, since it's not known to break
49639+ any software. If the sysctl option is enabled, a sysctl option with
49640+ name "chroot_enforce_chdir" is created.
49641+
49642+config GRKERNSEC_CHROOT_CHMOD
49643+ bool "Deny (f)chmod +s"
49644+ depends on GRKERNSEC_CHROOT
49645+ help
49646+ If you say Y here, processes inside a chroot will not be able to chmod
49647+ or fchmod files to make them have suid or sgid bits. This protects
49648+ against another published method of breaking a chroot. If the sysctl
49649+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
49650+ created.
49651+
49652+config GRKERNSEC_CHROOT_FCHDIR
49653+ bool "Deny fchdir out of chroot"
49654+ depends on GRKERNSEC_CHROOT
49655+ help
49656+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
49657+ to a file descriptor of the chrooting process that points to a directory
49658+ outside the filesystem will be stopped. If the sysctl option
49659+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
49660+
49661+config GRKERNSEC_CHROOT_MKNOD
49662+ bool "Deny mknod"
49663+ depends on GRKERNSEC_CHROOT
49664+ help
49665+ If you say Y here, processes inside a chroot will not be allowed to
49666+ mknod. The problem with using mknod inside a chroot is that it
49667+ would allow an attacker to create a device entry that is the same
49668+ as one on the physical root of your system, which could range from
49669+ anything from the console device to a device for your harddrive (which
49670+ they could then use to wipe the drive or steal data). It is recommended
49671+ that you say Y here, unless you run into software incompatibilities.
49672+ If the sysctl option is enabled, a sysctl option with name
49673+ "chroot_deny_mknod" is created.
49674+
49675+config GRKERNSEC_CHROOT_SHMAT
49676+ bool "Deny shmat() out of chroot"
49677+ depends on GRKERNSEC_CHROOT
49678+ help
49679+ If you say Y here, processes inside a chroot will not be able to attach
49680+ to shared memory segments that were created outside of the chroot jail.
49681+ It is recommended that you say Y here. If the sysctl option is enabled,
49682+ a sysctl option with name "chroot_deny_shmat" is created.
49683+
49684+config GRKERNSEC_CHROOT_UNIX
49685+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
49686+ depends on GRKERNSEC_CHROOT
49687+ help
49688+ If you say Y here, processes inside a chroot will not be able to
49689+ connect to abstract (meaning not belonging to a filesystem) Unix
49690+ domain sockets that were bound outside of a chroot. It is recommended
49691+ that you say Y here. If the sysctl option is enabled, a sysctl option
49692+ with name "chroot_deny_unix" is created.
49693+
49694+config GRKERNSEC_CHROOT_FINDTASK
49695+ bool "Protect outside processes"
49696+ depends on GRKERNSEC_CHROOT
49697+ help
49698+ If you say Y here, processes inside a chroot will not be able to
49699+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
49700+ getsid, or view any process outside of the chroot. If the sysctl
49701+ option is enabled, a sysctl option with name "chroot_findtask" is
49702+ created.
49703+
49704+config GRKERNSEC_CHROOT_NICE
49705+ bool "Restrict priority changes"
49706+ depends on GRKERNSEC_CHROOT
49707+ help
49708+ If you say Y here, processes inside a chroot will not be able to raise
49709+ the priority of processes in the chroot, or alter the priority of
49710+ processes outside the chroot. This provides more security than simply
49711+ removing CAP_SYS_NICE from the process' capability set. If the
49712+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
49713+ is created.
49714+
49715+config GRKERNSEC_CHROOT_SYSCTL
49716+ bool "Deny sysctl writes"
49717+ depends on GRKERNSEC_CHROOT
49718+ help
49719+ If you say Y here, an attacker in a chroot will not be able to
49720+ write to sysctl entries, either by sysctl(2) or through a /proc
49721+ interface. It is strongly recommended that you say Y here. If the
49722+ sysctl option is enabled, a sysctl option with name
49723+ "chroot_deny_sysctl" is created.
49724+
49725+config GRKERNSEC_CHROOT_CAPS
49726+ bool "Capability restrictions"
49727+ depends on GRKERNSEC_CHROOT
49728+ help
49729+ If you say Y here, the capabilities on all processes within a
49730+ chroot jail will be lowered to stop module insertion, raw i/o,
49731+ system and net admin tasks, rebooting the system, modifying immutable
49732+ files, modifying IPC owned by another, and changing the system time.
49733+ This is left an option because it can break some apps. Disable this
49734+ if your chrooted apps are having problems performing those kinds of
49735+ tasks. If the sysctl option is enabled, a sysctl option with
49736+ name "chroot_caps" is created.
49737+
49738+endmenu
49739+menu "Kernel Auditing"
49740+depends on GRKERNSEC
49741+
49742+config GRKERNSEC_AUDIT_GROUP
49743+ bool "Single group for auditing"
49744+ help
49745+ If you say Y here, the exec, chdir, and (un)mount logging features
49746+ will only operate on a group you specify. This option is recommended
49747+ if you only want to watch certain users instead of having a large
49748+ amount of logs from the entire system. If the sysctl option is enabled,
49749+ a sysctl option with name "audit_group" is created.
49750+
49751+config GRKERNSEC_AUDIT_GID
49752+ int "GID for auditing"
49753+ depends on GRKERNSEC_AUDIT_GROUP
49754+ default 1007
49755+
49756+config GRKERNSEC_EXECLOG
49757+ bool "Exec logging"
49758+ help
49759+ If you say Y here, all execve() calls will be logged (since the
49760+ other exec*() calls are frontends to execve(), all execution
49761+ will be logged). Useful for shell-servers that like to keep track
49762+ of their users. If the sysctl option is enabled, a sysctl option with
49763+ name "exec_logging" is created.
49764+ WARNING: This option when enabled will produce a LOT of logs, especially
49765+ on an active system.
49766+
49767+config GRKERNSEC_RESLOG
49768+ bool "Resource logging"
49769+ help
49770+ If you say Y here, all attempts to overstep resource limits will
49771+ be logged with the resource name, the requested size, and the current
49772+ limit. It is highly recommended that you say Y here. If the sysctl
49773+ option is enabled, a sysctl option with name "resource_logging" is
49774+ created. If the RBAC system is enabled, the sysctl value is ignored.
49775+
49776+config GRKERNSEC_CHROOT_EXECLOG
49777+ bool "Log execs within chroot"
49778+ help
49779+ If you say Y here, all executions inside a chroot jail will be logged
49780+ to syslog. This can cause a large amount of logs if certain
49781+ applications (eg. djb's daemontools) are installed on the system, and
49782+ is therefore left as an option. If the sysctl option is enabled, a
49783+ sysctl option with name "chroot_execlog" is created.
49784+
49785+config GRKERNSEC_AUDIT_PTRACE
49786+ bool "Ptrace logging"
49787+ help
49788+ If you say Y here, all attempts to attach to a process via ptrace
49789+ will be logged. If the sysctl option is enabled, a sysctl option
49790+ with name "audit_ptrace" is created.
49791+
49792+config GRKERNSEC_AUDIT_CHDIR
49793+ bool "Chdir logging"
49794+ help
49795+ If you say Y here, all chdir() calls will be logged. If the sysctl
49796+ option is enabled, a sysctl option with name "audit_chdir" is created.
49797+
49798+config GRKERNSEC_AUDIT_MOUNT
49799+ bool "(Un)Mount logging"
49800+ help
49801+ If you say Y here, all mounts and unmounts will be logged. If the
49802+ sysctl option is enabled, a sysctl option with name "audit_mount" is
49803+ created.
49804+
49805+config GRKERNSEC_SIGNAL
49806+ bool "Signal logging"
49807+ help
49808+ If you say Y here, certain important signals will be logged, such as
49809+ SIGSEGV, which will as a result inform you of when a error in a program
49810+ occurred, which in some cases could mean a possible exploit attempt.
49811+ If the sysctl option is enabled, a sysctl option with name
49812+ "signal_logging" is created.
49813+
49814+config GRKERNSEC_FORKFAIL
49815+ bool "Fork failure logging"
49816+ help
49817+ If you say Y here, all failed fork() attempts will be logged.
49818+ This could suggest a fork bomb, or someone attempting to overstep
49819+ their process limit. If the sysctl option is enabled, a sysctl option
49820+ with name "forkfail_logging" is created.
49821+
49822+config GRKERNSEC_TIME
49823+ bool "Time change logging"
49824+ help
49825+ If you say Y here, any changes of the system clock will be logged.
49826+ If the sysctl option is enabled, a sysctl option with name
49827+ "timechange_logging" is created.
49828+
49829+config GRKERNSEC_PROC_IPADDR
49830+ bool "/proc/<pid>/ipaddr support"
49831+ help
49832+ If you say Y here, a new entry will be added to each /proc/<pid>
49833+ directory that contains the IP address of the person using the task.
49834+ The IP is carried across local TCP and AF_UNIX stream sockets.
49835+ This information can be useful for IDS/IPSes to perform remote response
49836+ to a local attack. The entry is readable by only the owner of the
49837+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
49838+ the RBAC system), and thus does not create privacy concerns.
49839+
49840+config GRKERNSEC_RWXMAP_LOG
49841+ bool 'Denied RWX mmap/mprotect logging'
49842+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
49843+ help
49844+ If you say Y here, calls to mmap() and mprotect() with explicit
49845+ usage of PROT_WRITE and PROT_EXEC together will be logged when
49846+ denied by the PAX_MPROTECT feature. If the sysctl option is
49847+ enabled, a sysctl option with name "rwxmap_logging" is created.
49848+
49849+config GRKERNSEC_AUDIT_TEXTREL
49850+ bool 'ELF text relocations logging (READ HELP)'
49851+ depends on PAX_MPROTECT
49852+ help
49853+ If you say Y here, text relocations will be logged with the filename
49854+ of the offending library or binary. The purpose of the feature is
49855+ to help Linux distribution developers get rid of libraries and
49856+ binaries that need text relocations which hinder the future progress
49857+ of PaX. Only Linux distribution developers should say Y here, and
49858+ never on a production machine, as this option creates an information
49859+ leak that could aid an attacker in defeating the randomization of
49860+ a single memory region. If the sysctl option is enabled, a sysctl
49861+ option with name "audit_textrel" is created.
49862+
49863+endmenu
49864+
49865+menu "Executable Protections"
49866+depends on GRKERNSEC
49867+
49868+config GRKERNSEC_DMESG
49869+ bool "Dmesg(8) restriction"
49870+ help
49871+ If you say Y here, non-root users will not be able to use dmesg(8)
49872+ to view up to the last 4kb of messages in the kernel's log buffer.
49873+ The kernel's log buffer often contains kernel addresses and other
49874+ identifying information useful to an attacker in fingerprinting a
49875+ system for a targeted exploit.
49876+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
49877+ created.
49878+
49879+config GRKERNSEC_HARDEN_PTRACE
49880+ bool "Deter ptrace-based process snooping"
49881+ help
49882+ If you say Y here, TTY sniffers and other malicious monitoring
49883+ programs implemented through ptrace will be defeated. If you
49884+ have been using the RBAC system, this option has already been
49885+ enabled for several years for all users, with the ability to make
49886+ fine-grained exceptions.
49887+
49888+ This option only affects the ability of non-root users to ptrace
49889+ processes that are not a descendent of the ptracing process.
49890+ This means that strace ./binary and gdb ./binary will still work,
49891+ but attaching to arbitrary processes will not. If the sysctl
49892+ option is enabled, a sysctl option with name "harden_ptrace" is
49893+ created.
49894+
49895+config GRKERNSEC_PTRACE_READEXEC
49896+ bool "Require read access to ptrace sensitive binaries"
49897+ help
49898+ If you say Y here, unprivileged users will not be able to ptrace unreadable
49899+ binaries. This option is useful in environments that
49900+ remove the read bits (e.g. file mode 4711) from suid binaries to
49901+ prevent infoleaking of their contents. This option adds
49902+ consistency to the use of that file mode, as the binary could normally
49903+ be read out when run without privileges while ptracing.
49904+
49905+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
49906+ is created.
49907+
49908+config GRKERNSEC_SETXID
49909+ bool "Enforce consistent multithreaded privileges"
49910+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
49911+ help
49912+ If you say Y here, a change from a root uid to a non-root uid
49913+ in a multithreaded application will cause the resulting uids,
49914+ gids, supplementary groups, and capabilities in that thread
49915+ to be propagated to the other threads of the process. In most
49916+ cases this is unnecessary, as glibc will emulate this behavior
49917+ on behalf of the application. Other libcs do not act in the
49918+ same way, allowing the other threads of the process to continue
49919+ running with root privileges. If the sysctl option is enabled,
49920+ a sysctl option with name "consistent_setxid" is created.
49921+
49922+config GRKERNSEC_TPE
49923+ bool "Trusted Path Execution (TPE)"
49924+ help
49925+ If you say Y here, you will be able to choose a gid to add to the
49926+ supplementary groups of users you want to mark as "untrusted."
49927+ These users will not be able to execute any files that are not in
49928+ root-owned directories writable only by root. If the sysctl option
49929+ is enabled, a sysctl option with name "tpe" is created.
49930+
49931+config GRKERNSEC_TPE_ALL
49932+ bool "Partially restrict all non-root users"
49933+ depends on GRKERNSEC_TPE
49934+ help
49935+ If you say Y here, all non-root users will be covered under
49936+ a weaker TPE restriction. This is separate from, and in addition to,
49937+ the main TPE options that you have selected elsewhere. Thus, if a
49938+ "trusted" GID is chosen, this restriction applies to even that GID.
49939+ Under this restriction, all non-root users will only be allowed to
49940+ execute files in directories they own that are not group or
49941+ world-writable, or in directories owned by root and writable only by
49942+ root. If the sysctl option is enabled, a sysctl option with name
49943+ "tpe_restrict_all" is created.
49944+
49945+config GRKERNSEC_TPE_INVERT
49946+ bool "Invert GID option"
49947+ depends on GRKERNSEC_TPE
49948+ help
49949+ If you say Y here, the group you specify in the TPE configuration will
49950+ decide what group TPE restrictions will be *disabled* for. This
49951+ option is useful if you want TPE restrictions to be applied to most
49952+ users on the system. If the sysctl option is enabled, a sysctl option
49953+ with name "tpe_invert" is created. Unlike other sysctl options, this
49954+ entry will default to on for backward-compatibility.
49955+
49956+config GRKERNSEC_TPE_GID
49957+ int "GID for untrusted users"
49958+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
49959+ default 1005
49960+ help
49961+ Setting this GID determines what group TPE restrictions will be
49962+ *enabled* for. If the sysctl option is enabled, a sysctl option
49963+ with name "tpe_gid" is created.
49964+
49965+config GRKERNSEC_TPE_GID
49966+ int "GID for trusted users"
49967+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
49968+ default 1005
49969+ help
49970+ Setting this GID determines what group TPE restrictions will be
49971+ *disabled* for. If the sysctl option is enabled, a sysctl option
49972+ with name "tpe_gid" is created.
49973+
49974+endmenu
49975+menu "Network Protections"
49976+depends on GRKERNSEC
49977+
49978+config GRKERNSEC_RANDNET
49979+ bool "Larger entropy pools"
49980+ help
49981+ If you say Y here, the entropy pools used for many features of Linux
49982+ and grsecurity will be doubled in size. Since several grsecurity
49983+ features use additional randomness, it is recommended that you say Y
49984+ here. Saying Y here has a similar effect as modifying
49985+ /proc/sys/kernel/random/poolsize.
49986+
49987+config GRKERNSEC_BLACKHOLE
49988+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
49989+ depends on NET
49990+ help
49991+ If you say Y here, neither TCP resets nor ICMP
49992+ destination-unreachable packets will be sent in response to packets
49993+ sent to ports for which no associated listening process exists.
49994+ This feature supports both IPV4 and IPV6 and exempts the
49995+ loopback interface from blackholing. Enabling this feature
49996+ makes a host more resilient to DoS attacks and reduces network
49997+ visibility against scanners.
49998+
49999+ The blackhole feature as-implemented is equivalent to the FreeBSD
50000+ blackhole feature, as it prevents RST responses to all packets, not
50001+ just SYNs. Under most application behavior this causes no
50002+ problems, but applications (like haproxy) may not close certain
50003+ connections in a way that cleanly terminates them on the remote
50004+ end, leaving the remote host in LAST_ACK state. Because of this
50005+ side-effect and to prevent intentional LAST_ACK DoSes, this
50006+ feature also adds automatic mitigation against such attacks.
50007+ The mitigation drastically reduces the amount of time a socket
50008+ can spend in LAST_ACK state. If you're using haproxy and not
50009+ all servers it connects to have this option enabled, consider
50010+ disabling this feature on the haproxy host.
50011+
50012+ If the sysctl option is enabled, two sysctl options with names
50013+ "ip_blackhole" and "lastack_retries" will be created.
50014+ While "ip_blackhole" takes the standard zero/non-zero on/off
50015+ toggle, "lastack_retries" uses the same kinds of values as
50016+ "tcp_retries1" and "tcp_retries2". The default value of 4
50017+ prevents a socket from lasting more than 45 seconds in LAST_ACK
50018+ state.
50019+
50020+config GRKERNSEC_SOCKET
50021+ bool "Socket restrictions"
50022+ depends on NET
50023+ help
50024+ If you say Y here, you will be able to choose from several options.
50025+ If you assign a GID on your system and add it to the supplementary
50026+ groups of users you want to restrict socket access to, this patch
50027+ will perform up to three things, based on the option(s) you choose.
50028+
50029+config GRKERNSEC_SOCKET_ALL
50030+ bool "Deny any sockets to group"
50031+ depends on GRKERNSEC_SOCKET
50032+ help
50033+ If you say Y here, you will be able to choose a GID of whose users will
50034+ be unable to connect to other hosts from your machine or run server
50035+ applications from your machine. If the sysctl option is enabled, a
50036+ sysctl option with name "socket_all" is created.
50037+
50038+config GRKERNSEC_SOCKET_ALL_GID
50039+ int "GID to deny all sockets for"
50040+ depends on GRKERNSEC_SOCKET_ALL
50041+ default 1004
50042+ help
50043+ Here you can choose the GID to disable socket access for. Remember to
50044+ add the users you want socket access disabled for to the GID
50045+ specified here. If the sysctl option is enabled, a sysctl option
50046+ with name "socket_all_gid" is created.
50047+
50048+config GRKERNSEC_SOCKET_CLIENT
50049+ bool "Deny client sockets to group"
50050+ depends on GRKERNSEC_SOCKET
50051+ help
50052+ If you say Y here, you will be able to choose a GID of whose users will
50053+ be unable to connect to other hosts from your machine, but will be
50054+ able to run servers. If this option is enabled, all users in the group
50055+ you specify will have to use passive mode when initiating ftp transfers
50056+ from the shell on your machine. If the sysctl option is enabled, a
50057+ sysctl option with name "socket_client" is created.
50058+
50059+config GRKERNSEC_SOCKET_CLIENT_GID
50060+ int "GID to deny client sockets for"
50061+ depends on GRKERNSEC_SOCKET_CLIENT
50062+ default 1003
50063+ help
50064+ Here you can choose the GID to disable client socket access for.
50065+ Remember to add the users you want client socket access disabled for to
50066+ the GID specified here. If the sysctl option is enabled, a sysctl
50067+ option with name "socket_client_gid" is created.
50068+
50069+config GRKERNSEC_SOCKET_SERVER
50070+ bool "Deny server sockets to group"
50071+ depends on GRKERNSEC_SOCKET
50072+ help
50073+ If you say Y here, you will be able to choose a GID of whose users will
50074+ be unable to run server applications from your machine. If the sysctl
50075+ option is enabled, a sysctl option with name "socket_server" is created.
50076+
50077+config GRKERNSEC_SOCKET_SERVER_GID
50078+ int "GID to deny server sockets for"
50079+ depends on GRKERNSEC_SOCKET_SERVER
50080+ default 1002
50081+ help
50082+ Here you can choose the GID to disable server socket access for.
50083+ Remember to add the users you want server socket access disabled for to
50084+ the GID specified here. If the sysctl option is enabled, a sysctl
50085+ option with name "socket_server_gid" is created.
50086+
50087+endmenu
50088+menu "Sysctl support"
50089+depends on GRKERNSEC && SYSCTL
50090+
50091+config GRKERNSEC_SYSCTL
50092+ bool "Sysctl support"
50093+ help
50094+ If you say Y here, you will be able to change the options that
50095+ grsecurity runs with at bootup, without having to recompile your
50096+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
50097+ to enable (1) or disable (0) various features. All the sysctl entries
50098+ are mutable until the "grsec_lock" entry is set to a non-zero value.
50099+ All features enabled in the kernel configuration are disabled at boot
50100+ if you do not say Y to the "Turn on features by default" option.
50101+ All options should be set at startup, and the grsec_lock entry should
50102+ be set to a non-zero value after all the options are set.
50103+ *THIS IS EXTREMELY IMPORTANT*
50104+
50105+config GRKERNSEC_SYSCTL_DISTRO
50106+ bool "Extra sysctl support for distro makers (READ HELP)"
50107+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
50108+ help
50109+ If you say Y here, additional sysctl options will be created
50110+ for features that affect processes running as root. Therefore,
50111+ it is critical when using this option that the grsec_lock entry be
50112+ enabled after boot. Only distros with prebuilt kernel packages
50113+ with this option enabled that can ensure grsec_lock is enabled
50114+ after boot should use this option.
50115+ *Failure to set grsec_lock after boot makes all grsec features
50116+ this option covers useless*
50117+
50118+ Currently this option creates the following sysctl entries:
50119+ "Disable Privileged I/O": "disable_priv_io"
50120+
50121+config GRKERNSEC_SYSCTL_ON
50122+ bool "Turn on features by default"
50123+ depends on GRKERNSEC_SYSCTL
50124+ help
50125+ If you say Y here, instead of having all features enabled in the
50126+ kernel configuration disabled at boot time, the features will be
50127+ enabled at boot time. It is recommended you say Y here unless
50128+ there is some reason you would want all sysctl-tunable features to
50129+ be disabled by default. As mentioned elsewhere, it is important
50130+ to enable the grsec_lock entry once you have finished modifying
50131+ the sysctl entries.
50132+
50133+endmenu
50134+menu "Logging Options"
50135+depends on GRKERNSEC
50136+
50137+config GRKERNSEC_FLOODTIME
50138+ int "Seconds in between log messages (minimum)"
50139+ default 10
50140+ help
50141+ This option allows you to enforce the number of seconds between
50142+ grsecurity log messages. The default should be suitable for most
50143+ people, however, if you choose to change it, choose a value small enough
50144+ to allow informative logs to be produced, but large enough to
50145+ prevent flooding.
50146+
50147+config GRKERNSEC_FLOODBURST
50148+ int "Number of messages in a burst (maximum)"
50149+ default 6
50150+ help
50151+ This option allows you to choose the maximum number of messages allowed
50152+ within the flood time interval you chose in a separate option. The
50153+ default should be suitable for most people, however if you find that
50154+ many of your logs are being interpreted as flooding, you may want to
50155+ raise this value.
50156+
50157+endmenu
50158+
50159+endmenu
50160diff --git a/grsecurity/Makefile b/grsecurity/Makefile
50161new file mode 100644
50162index 0000000..1b9afa9
50163--- /dev/null
50164+++ b/grsecurity/Makefile
50165@@ -0,0 +1,38 @@
50166+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
50167+# during 2001-2009 it has been completely redesigned by Brad Spengler
50168+# into an RBAC system
50169+#
50170+# All code in this directory and various hooks inserted throughout the kernel
50171+# are copyright Brad Spengler - Open Source Security, Inc., and released
50172+# under the GPL v2 or higher
50173+
50174+KBUILD_CFLAGS += -Werror
50175+
50176+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
50177+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
50178+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
50179+
50180+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
50181+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
50182+ gracl_learn.o grsec_log.o
50183+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
50184+
50185+ifdef CONFIG_NET
50186+obj-y += grsec_sock.o
50187+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
50188+endif
50189+
50190+ifndef CONFIG_GRKERNSEC
50191+obj-y += grsec_disabled.o
50192+endif
50193+
50194+ifdef CONFIG_GRKERNSEC_HIDESYM
50195+extra-y := grsec_hidesym.o
50196+$(obj)/grsec_hidesym.o:
50197+ @-chmod -f 500 /boot
50198+ @-chmod -f 500 /lib/modules
50199+ @-chmod -f 500 /lib64/modules
50200+ @-chmod -f 500 /lib32/modules
50201+ @-chmod -f 700 .
50202+ @echo ' grsec: protected kernel image paths'
50203+endif
50204diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
50205new file mode 100644
50206index 0000000..00b6c54
50207--- /dev/null
50208+++ b/grsecurity/gracl.c
50209@@ -0,0 +1,4012 @@
50210+#include <linux/kernel.h>
50211+#include <linux/module.h>
50212+#include <linux/sched.h>
50213+#include <linux/mm.h>
50214+#include <linux/file.h>
50215+#include <linux/fs.h>
50216+#include <linux/namei.h>
50217+#include <linux/mount.h>
50218+#include <linux/tty.h>
50219+#include <linux/proc_fs.h>
50220+#include <linux/lglock.h>
50221+#include <linux/slab.h>
50222+#include <linux/vmalloc.h>
50223+#include <linux/types.h>
50224+#include <linux/sysctl.h>
50225+#include <linux/netdevice.h>
50226+#include <linux/ptrace.h>
50227+#include <linux/gracl.h>
50228+#include <linux/gralloc.h>
50229+#include <linux/security.h>
50230+#include <linux/grinternal.h>
50231+#include <linux/pid_namespace.h>
50232+#include <linux/fdtable.h>
50233+#include <linux/percpu.h>
50234+#include "../fs/mount.h"
50235+
50236+#include <asm/uaccess.h>
50237+#include <asm/errno.h>
50238+#include <asm/mman.h>
50239+
50240+static struct acl_role_db acl_role_set;
50241+static struct name_db name_set;
50242+static struct inodev_db inodev_set;
50243+
50244+/* for keeping track of userspace pointers used for subjects, so we
50245+ can share references in the kernel as well
50246+*/
50247+
50248+static struct path real_root;
50249+
50250+static struct acl_subj_map_db subj_map_set;
50251+
50252+static struct acl_role_label *default_role;
50253+
50254+static struct acl_role_label *role_list;
50255+
50256+static u16 acl_sp_role_value;
50257+
50258+extern char *gr_shared_page[4];
50259+static DEFINE_MUTEX(gr_dev_mutex);
50260+DEFINE_RWLOCK(gr_inode_lock);
50261+
50262+struct gr_arg *gr_usermode;
50263+
50264+static unsigned int gr_status __read_only = GR_STATUS_INIT;
50265+
50266+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
50267+extern void gr_clear_learn_entries(void);
50268+
50269+#ifdef CONFIG_GRKERNSEC_RESLOG
50270+extern void gr_log_resource(const struct task_struct *task,
50271+ const int res, const unsigned long wanted, const int gt);
50272+#endif
50273+
50274+unsigned char *gr_system_salt;
50275+unsigned char *gr_system_sum;
50276+
50277+static struct sprole_pw **acl_special_roles = NULL;
50278+static __u16 num_sprole_pws = 0;
50279+
50280+static struct acl_role_label *kernel_role = NULL;
50281+
50282+static unsigned int gr_auth_attempts = 0;
50283+static unsigned long gr_auth_expires = 0UL;
50284+
50285+#ifdef CONFIG_NET
50286+extern struct vfsmount *sock_mnt;
50287+#endif
50288+
50289+extern struct vfsmount *pipe_mnt;
50290+extern struct vfsmount *shm_mnt;
50291+#ifdef CONFIG_HUGETLBFS
50292+extern struct vfsmount *hugetlbfs_vfsmount;
50293+#endif
50294+
50295+static struct acl_object_label *fakefs_obj_rw;
50296+static struct acl_object_label *fakefs_obj_rwx;
50297+
50298+extern int gr_init_uidset(void);
50299+extern void gr_free_uidset(void);
50300+extern void gr_remove_uid(uid_t uid);
50301+extern int gr_find_uid(uid_t uid);
50302+
50303+DECLARE_BRLOCK(vfsmount_lock);
50304+
50305+__inline__ int
50306+gr_acl_is_enabled(void)
50307+{
50308+ return (gr_status & GR_READY);
50309+}
50310+
50311+#ifdef CONFIG_BTRFS_FS
50312+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
50313+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
50314+#endif
50315+
50316+static inline dev_t __get_dev(const struct dentry *dentry)
50317+{
50318+#ifdef CONFIG_BTRFS_FS
50319+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
50320+ return get_btrfs_dev_from_inode(dentry->d_inode);
50321+ else
50322+#endif
50323+ return dentry->d_inode->i_sb->s_dev;
50324+}
50325+
50326+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
50327+{
50328+ return __get_dev(dentry);
50329+}
50330+
50331+static char gr_task_roletype_to_char(struct task_struct *task)
50332+{
50333+ switch (task->role->roletype &
50334+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
50335+ GR_ROLE_SPECIAL)) {
50336+ case GR_ROLE_DEFAULT:
50337+ return 'D';
50338+ case GR_ROLE_USER:
50339+ return 'U';
50340+ case GR_ROLE_GROUP:
50341+ return 'G';
50342+ case GR_ROLE_SPECIAL:
50343+ return 'S';
50344+ }
50345+
50346+ return 'X';
50347+}
50348+
50349+char gr_roletype_to_char(void)
50350+{
50351+ return gr_task_roletype_to_char(current);
50352+}
50353+
50354+__inline__ int
50355+gr_acl_tpe_check(void)
50356+{
50357+ if (unlikely(!(gr_status & GR_READY)))
50358+ return 0;
50359+ if (current->role->roletype & GR_ROLE_TPE)
50360+ return 1;
50361+ else
50362+ return 0;
50363+}
50364+
50365+int
50366+gr_handle_rawio(const struct inode *inode)
50367+{
50368+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50369+ if (inode && S_ISBLK(inode->i_mode) &&
50370+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
50371+ !capable(CAP_SYS_RAWIO))
50372+ return 1;
50373+#endif
50374+ return 0;
50375+}
50376+
50377+static int
50378+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
50379+{
50380+ if (likely(lena != lenb))
50381+ return 0;
50382+
50383+ return !memcmp(a, b, lena);
50384+}
50385+
50386+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
50387+{
50388+ *buflen -= namelen;
50389+ if (*buflen < 0)
50390+ return -ENAMETOOLONG;
50391+ *buffer -= namelen;
50392+ memcpy(*buffer, str, namelen);
50393+ return 0;
50394+}
50395+
50396+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
50397+{
50398+ return prepend(buffer, buflen, name->name, name->len);
50399+}
50400+
50401+static int prepend_path(const struct path *path, struct path *root,
50402+ char **buffer, int *buflen)
50403+{
50404+ struct dentry *dentry = path->dentry;
50405+ struct vfsmount *vfsmnt = path->mnt;
50406+ struct mount *mnt = real_mount(vfsmnt);
50407+ bool slash = false;
50408+ int error = 0;
50409+
50410+ while (dentry != root->dentry || vfsmnt != root->mnt) {
50411+ struct dentry * parent;
50412+
50413+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
50414+ /* Global root? */
50415+ if (!mnt_has_parent(mnt)) {
50416+ goto out;
50417+ }
50418+ dentry = mnt->mnt_mountpoint;
50419+ mnt = mnt->mnt_parent;
50420+ vfsmnt = &mnt->mnt;
50421+ continue;
50422+ }
50423+ parent = dentry->d_parent;
50424+ prefetch(parent);
50425+ spin_lock(&dentry->d_lock);
50426+ error = prepend_name(buffer, buflen, &dentry->d_name);
50427+ spin_unlock(&dentry->d_lock);
50428+ if (!error)
50429+ error = prepend(buffer, buflen, "/", 1);
50430+ if (error)
50431+ break;
50432+
50433+ slash = true;
50434+ dentry = parent;
50435+ }
50436+
50437+out:
50438+ if (!error && !slash)
50439+ error = prepend(buffer, buflen, "/", 1);
50440+
50441+ return error;
50442+}
50443+
50444+/* this must be called with vfsmount_lock and rename_lock held */
50445+
50446+static char *__our_d_path(const struct path *path, struct path *root,
50447+ char *buf, int buflen)
50448+{
50449+ char *res = buf + buflen;
50450+ int error;
50451+
50452+ prepend(&res, &buflen, "\0", 1);
50453+ error = prepend_path(path, root, &res, &buflen);
50454+ if (error)
50455+ return ERR_PTR(error);
50456+
50457+ return res;
50458+}
50459+
50460+static char *
50461+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
50462+{
50463+ char *retval;
50464+
50465+ retval = __our_d_path(path, root, buf, buflen);
50466+ if (unlikely(IS_ERR(retval)))
50467+ retval = strcpy(buf, "<path too long>");
50468+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
50469+ retval[1] = '\0';
50470+
50471+ return retval;
50472+}
50473+
50474+static char *
50475+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50476+ char *buf, int buflen)
50477+{
50478+ struct path path;
50479+ char *res;
50480+
50481+ path.dentry = (struct dentry *)dentry;
50482+ path.mnt = (struct vfsmount *)vfsmnt;
50483+
50484+ /* we can use real_root.dentry, real_root.mnt, because this is only called
50485+ by the RBAC system */
50486+ res = gen_full_path(&path, &real_root, buf, buflen);
50487+
50488+ return res;
50489+}
50490+
50491+static char *
50492+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50493+ char *buf, int buflen)
50494+{
50495+ char *res;
50496+ struct path path;
50497+ struct path root;
50498+ struct task_struct *reaper = init_pid_ns.child_reaper;
50499+
50500+ path.dentry = (struct dentry *)dentry;
50501+ path.mnt = (struct vfsmount *)vfsmnt;
50502+
50503+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
50504+ get_fs_root(reaper->fs, &root);
50505+
50506+ write_seqlock(&rename_lock);
50507+ br_read_lock(vfsmount_lock);
50508+ res = gen_full_path(&path, &root, buf, buflen);
50509+ br_read_unlock(vfsmount_lock);
50510+ write_sequnlock(&rename_lock);
50511+
50512+ path_put(&root);
50513+ return res;
50514+}
50515+
50516+static char *
50517+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50518+{
50519+ char *ret;
50520+ write_seqlock(&rename_lock);
50521+ br_read_lock(vfsmount_lock);
50522+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50523+ PAGE_SIZE);
50524+ br_read_unlock(vfsmount_lock);
50525+ write_sequnlock(&rename_lock);
50526+ return ret;
50527+}
50528+
50529+static char *
50530+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50531+{
50532+ char *ret;
50533+ char *buf;
50534+ int buflen;
50535+
50536+ write_seqlock(&rename_lock);
50537+ br_read_lock(vfsmount_lock);
50538+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50539+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
50540+ buflen = (int)(ret - buf);
50541+ if (buflen >= 5)
50542+ prepend(&ret, &buflen, "/proc", 5);
50543+ else
50544+ ret = strcpy(buf, "<path too long>");
50545+ br_read_unlock(vfsmount_lock);
50546+ write_sequnlock(&rename_lock);
50547+ return ret;
50548+}
50549+
50550+char *
50551+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
50552+{
50553+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50554+ PAGE_SIZE);
50555+}
50556+
50557+char *
50558+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
50559+{
50560+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
50561+ PAGE_SIZE);
50562+}
50563+
50564+char *
50565+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
50566+{
50567+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
50568+ PAGE_SIZE);
50569+}
50570+
50571+char *
50572+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
50573+{
50574+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
50575+ PAGE_SIZE);
50576+}
50577+
50578+char *
50579+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
50580+{
50581+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
50582+ PAGE_SIZE);
50583+}
50584+
50585+__inline__ __u32
50586+to_gr_audit(const __u32 reqmode)
50587+{
50588+ /* masks off auditable permission flags, then shifts them to create
50589+ auditing flags, and adds the special case of append auditing if
50590+ we're requesting write */
50591+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
50592+}
50593+
50594+struct acl_subject_label *
50595+lookup_subject_map(const struct acl_subject_label *userp)
50596+{
50597+ unsigned int index = shash(userp, subj_map_set.s_size);
50598+ struct subject_map *match;
50599+
50600+ match = subj_map_set.s_hash[index];
50601+
50602+ while (match && match->user != userp)
50603+ match = match->next;
50604+
50605+ if (match != NULL)
50606+ return match->kernel;
50607+ else
50608+ return NULL;
50609+}
50610+
50611+static void
50612+insert_subj_map_entry(struct subject_map *subjmap)
50613+{
50614+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
50615+ struct subject_map **curr;
50616+
50617+ subjmap->prev = NULL;
50618+
50619+ curr = &subj_map_set.s_hash[index];
50620+ if (*curr != NULL)
50621+ (*curr)->prev = subjmap;
50622+
50623+ subjmap->next = *curr;
50624+ *curr = subjmap;
50625+
50626+ return;
50627+}
50628+
50629+static struct acl_role_label *
50630+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
50631+ const gid_t gid)
50632+{
50633+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
50634+ struct acl_role_label *match;
50635+ struct role_allowed_ip *ipp;
50636+ unsigned int x;
50637+ u32 curr_ip = task->signal->curr_ip;
50638+
50639+ task->signal->saved_ip = curr_ip;
50640+
50641+ match = acl_role_set.r_hash[index];
50642+
50643+ while (match) {
50644+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
50645+ for (x = 0; x < match->domain_child_num; x++) {
50646+ if (match->domain_children[x] == uid)
50647+ goto found;
50648+ }
50649+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
50650+ break;
50651+ match = match->next;
50652+ }
50653+found:
50654+ if (match == NULL) {
50655+ try_group:
50656+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
50657+ match = acl_role_set.r_hash[index];
50658+
50659+ while (match) {
50660+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
50661+ for (x = 0; x < match->domain_child_num; x++) {
50662+ if (match->domain_children[x] == gid)
50663+ goto found2;
50664+ }
50665+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
50666+ break;
50667+ match = match->next;
50668+ }
50669+found2:
50670+ if (match == NULL)
50671+ match = default_role;
50672+ if (match->allowed_ips == NULL)
50673+ return match;
50674+ else {
50675+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
50676+ if (likely
50677+ ((ntohl(curr_ip) & ipp->netmask) ==
50678+ (ntohl(ipp->addr) & ipp->netmask)))
50679+ return match;
50680+ }
50681+ match = default_role;
50682+ }
50683+ } else if (match->allowed_ips == NULL) {
50684+ return match;
50685+ } else {
50686+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
50687+ if (likely
50688+ ((ntohl(curr_ip) & ipp->netmask) ==
50689+ (ntohl(ipp->addr) & ipp->netmask)))
50690+ return match;
50691+ }
50692+ goto try_group;
50693+ }
50694+
50695+ return match;
50696+}
50697+
50698+struct acl_subject_label *
50699+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
50700+ const struct acl_role_label *role)
50701+{
50702+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
50703+ struct acl_subject_label *match;
50704+
50705+ match = role->subj_hash[index];
50706+
50707+ while (match && (match->inode != ino || match->device != dev ||
50708+ (match->mode & GR_DELETED))) {
50709+ match = match->next;
50710+ }
50711+
50712+ if (match && !(match->mode & GR_DELETED))
50713+ return match;
50714+ else
50715+ return NULL;
50716+}
50717+
50718+struct acl_subject_label *
50719+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
50720+ const struct acl_role_label *role)
50721+{
50722+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
50723+ struct acl_subject_label *match;
50724+
50725+ match = role->subj_hash[index];
50726+
50727+ while (match && (match->inode != ino || match->device != dev ||
50728+ !(match->mode & GR_DELETED))) {
50729+ match = match->next;
50730+ }
50731+
50732+ if (match && (match->mode & GR_DELETED))
50733+ return match;
50734+ else
50735+ return NULL;
50736+}
50737+
50738+static struct acl_object_label *
50739+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
50740+ const struct acl_subject_label *subj)
50741+{
50742+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
50743+ struct acl_object_label *match;
50744+
50745+ match = subj->obj_hash[index];
50746+
50747+ while (match && (match->inode != ino || match->device != dev ||
50748+ (match->mode & GR_DELETED))) {
50749+ match = match->next;
50750+ }
50751+
50752+ if (match && !(match->mode & GR_DELETED))
50753+ return match;
50754+ else
50755+ return NULL;
50756+}
50757+
50758+static struct acl_object_label *
50759+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
50760+ const struct acl_subject_label *subj)
50761+{
50762+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
50763+ struct acl_object_label *match;
50764+
50765+ match = subj->obj_hash[index];
50766+
50767+ while (match && (match->inode != ino || match->device != dev ||
50768+ !(match->mode & GR_DELETED))) {
50769+ match = match->next;
50770+ }
50771+
50772+ if (match && (match->mode & GR_DELETED))
50773+ return match;
50774+
50775+ match = subj->obj_hash[index];
50776+
50777+ while (match && (match->inode != ino || match->device != dev ||
50778+ (match->mode & GR_DELETED))) {
50779+ match = match->next;
50780+ }
50781+
50782+ if (match && !(match->mode & GR_DELETED))
50783+ return match;
50784+ else
50785+ return NULL;
50786+}
50787+
50788+static struct name_entry *
50789+lookup_name_entry(const char *name)
50790+{
50791+ unsigned int len = strlen(name);
50792+ unsigned int key = full_name_hash(name, len);
50793+ unsigned int index = key % name_set.n_size;
50794+ struct name_entry *match;
50795+
50796+ match = name_set.n_hash[index];
50797+
50798+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
50799+ match = match->next;
50800+
50801+ return match;
50802+}
50803+
50804+static struct name_entry *
50805+lookup_name_entry_create(const char *name)
50806+{
50807+ unsigned int len = strlen(name);
50808+ unsigned int key = full_name_hash(name, len);
50809+ unsigned int index = key % name_set.n_size;
50810+ struct name_entry *match;
50811+
50812+ match = name_set.n_hash[index];
50813+
50814+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
50815+ !match->deleted))
50816+ match = match->next;
50817+
50818+ if (match && match->deleted)
50819+ return match;
50820+
50821+ match = name_set.n_hash[index];
50822+
50823+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
50824+ match->deleted))
50825+ match = match->next;
50826+
50827+ if (match && !match->deleted)
50828+ return match;
50829+ else
50830+ return NULL;
50831+}
50832+
50833+static struct inodev_entry *
50834+lookup_inodev_entry(const ino_t ino, const dev_t dev)
50835+{
50836+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
50837+ struct inodev_entry *match;
50838+
50839+ match = inodev_set.i_hash[index];
50840+
50841+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
50842+ match = match->next;
50843+
50844+ return match;
50845+}
50846+
50847+static void
50848+insert_inodev_entry(struct inodev_entry *entry)
50849+{
50850+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
50851+ inodev_set.i_size);
50852+ struct inodev_entry **curr;
50853+
50854+ entry->prev = NULL;
50855+
50856+ curr = &inodev_set.i_hash[index];
50857+ if (*curr != NULL)
50858+ (*curr)->prev = entry;
50859+
50860+ entry->next = *curr;
50861+ *curr = entry;
50862+
50863+ return;
50864+}
50865+
50866+static void
50867+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
50868+{
50869+ unsigned int index =
50870+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
50871+ struct acl_role_label **curr;
50872+ struct acl_role_label *tmp, *tmp2;
50873+
50874+ curr = &acl_role_set.r_hash[index];
50875+
50876+ /* simple case, slot is empty, just set it to our role */
50877+ if (*curr == NULL) {
50878+ *curr = role;
50879+ } else {
50880+ /* example:
50881+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
50882+ 2 -> 3
50883+ */
50884+ /* first check to see if we can already be reached via this slot */
50885+ tmp = *curr;
50886+ while (tmp && tmp != role)
50887+ tmp = tmp->next;
50888+ if (tmp == role) {
50889+ /* we don't need to add ourselves to this slot's chain */
50890+ return;
50891+ }
50892+ /* we need to add ourselves to this chain, two cases */
50893+ if (role->next == NULL) {
50894+ /* simple case, append the current chain to our role */
50895+ role->next = *curr;
50896+ *curr = role;
50897+ } else {
50898+ /* 1 -> 2 -> 3 -> 4
50899+ 2 -> 3 -> 4
50900+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
50901+ */
50902+ /* trickier case: walk our role's chain until we find
50903+ the role for the start of the current slot's chain */
50904+ tmp = role;
50905+ tmp2 = *curr;
50906+ while (tmp->next && tmp->next != tmp2)
50907+ tmp = tmp->next;
50908+ if (tmp->next == tmp2) {
50909+ /* from example above, we found 3, so just
50910+ replace this slot's chain with ours */
50911+ *curr = role;
50912+ } else {
50913+ /* we didn't find a subset of our role's chain
50914+ in the current slot's chain, so append their
50915+ chain to ours, and set us as the first role in
50916+ the slot's chain
50917+
50918+ we could fold this case with the case above,
50919+ but making it explicit for clarity
50920+ */
50921+ tmp->next = tmp2;
50922+ *curr = role;
50923+ }
50924+ }
50925+ }
50926+
50927+ return;
50928+}
50929+
50930+static void
50931+insert_acl_role_label(struct acl_role_label *role)
50932+{
50933+ int i;
50934+
50935+ if (role_list == NULL) {
50936+ role_list = role;
50937+ role->prev = NULL;
50938+ } else {
50939+ role->prev = role_list;
50940+ role_list = role;
50941+ }
50942+
50943+ /* used for hash chains */
50944+ role->next = NULL;
50945+
50946+ if (role->roletype & GR_ROLE_DOMAIN) {
50947+ for (i = 0; i < role->domain_child_num; i++)
50948+ __insert_acl_role_label(role, role->domain_children[i]);
50949+ } else
50950+ __insert_acl_role_label(role, role->uidgid);
50951+}
50952+
50953+static int
50954+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
50955+{
50956+ struct name_entry **curr, *nentry;
50957+ struct inodev_entry *ientry;
50958+ unsigned int len = strlen(name);
50959+ unsigned int key = full_name_hash(name, len);
50960+ unsigned int index = key % name_set.n_size;
50961+
50962+ curr = &name_set.n_hash[index];
50963+
50964+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
50965+ curr = &((*curr)->next);
50966+
50967+ if (*curr != NULL)
50968+ return 1;
50969+
50970+ nentry = acl_alloc(sizeof (struct name_entry));
50971+ if (nentry == NULL)
50972+ return 0;
50973+ ientry = acl_alloc(sizeof (struct inodev_entry));
50974+ if (ientry == NULL)
50975+ return 0;
50976+ ientry->nentry = nentry;
50977+
50978+ nentry->key = key;
50979+ nentry->name = name;
50980+ nentry->inode = inode;
50981+ nentry->device = device;
50982+ nentry->len = len;
50983+ nentry->deleted = deleted;
50984+
50985+ nentry->prev = NULL;
50986+ curr = &name_set.n_hash[index];
50987+ if (*curr != NULL)
50988+ (*curr)->prev = nentry;
50989+ nentry->next = *curr;
50990+ *curr = nentry;
50991+
50992+ /* insert us into the table searchable by inode/dev */
50993+ insert_inodev_entry(ientry);
50994+
50995+ return 1;
50996+}
50997+
50998+static void
50999+insert_acl_obj_label(struct acl_object_label *obj,
51000+ struct acl_subject_label *subj)
51001+{
51002+ unsigned int index =
51003+ fhash(obj->inode, obj->device, subj->obj_hash_size);
51004+ struct acl_object_label **curr;
51005+
51006+
51007+ obj->prev = NULL;
51008+
51009+ curr = &subj->obj_hash[index];
51010+ if (*curr != NULL)
51011+ (*curr)->prev = obj;
51012+
51013+ obj->next = *curr;
51014+ *curr = obj;
51015+
51016+ return;
51017+}
51018+
51019+static void
51020+insert_acl_subj_label(struct acl_subject_label *obj,
51021+ struct acl_role_label *role)
51022+{
51023+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
51024+ struct acl_subject_label **curr;
51025+
51026+ obj->prev = NULL;
51027+
51028+ curr = &role->subj_hash[index];
51029+ if (*curr != NULL)
51030+ (*curr)->prev = obj;
51031+
51032+ obj->next = *curr;
51033+ *curr = obj;
51034+
51035+ return;
51036+}
51037+
51038+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
51039+
51040+static void *
51041+create_table(__u32 * len, int elementsize)
51042+{
51043+ unsigned int table_sizes[] = {
51044+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
51045+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
51046+ 4194301, 8388593, 16777213, 33554393, 67108859
51047+ };
51048+ void *newtable = NULL;
51049+ unsigned int pwr = 0;
51050+
51051+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
51052+ table_sizes[pwr] <= *len)
51053+ pwr++;
51054+
51055+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
51056+ return newtable;
51057+
51058+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
51059+ newtable =
51060+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
51061+ else
51062+ newtable = vmalloc(table_sizes[pwr] * elementsize);
51063+
51064+ *len = table_sizes[pwr];
51065+
51066+ return newtable;
51067+}
51068+
51069+static int
51070+init_variables(const struct gr_arg *arg)
51071+{
51072+ struct task_struct *reaper = init_pid_ns.child_reaper;
51073+ unsigned int stacksize;
51074+
51075+ subj_map_set.s_size = arg->role_db.num_subjects;
51076+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
51077+ name_set.n_size = arg->role_db.num_objects;
51078+ inodev_set.i_size = arg->role_db.num_objects;
51079+
51080+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
51081+ !name_set.n_size || !inodev_set.i_size)
51082+ return 1;
51083+
51084+ if (!gr_init_uidset())
51085+ return 1;
51086+
51087+ /* set up the stack that holds allocation info */
51088+
51089+ stacksize = arg->role_db.num_pointers + 5;
51090+
51091+ if (!acl_alloc_stack_init(stacksize))
51092+ return 1;
51093+
51094+ /* grab reference for the real root dentry and vfsmount */
51095+ get_fs_root(reaper->fs, &real_root);
51096+
51097+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51098+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
51099+#endif
51100+
51101+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
51102+ if (fakefs_obj_rw == NULL)
51103+ return 1;
51104+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
51105+
51106+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
51107+ if (fakefs_obj_rwx == NULL)
51108+ return 1;
51109+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
51110+
51111+ subj_map_set.s_hash =
51112+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
51113+ acl_role_set.r_hash =
51114+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
51115+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
51116+ inodev_set.i_hash =
51117+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
51118+
51119+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
51120+ !name_set.n_hash || !inodev_set.i_hash)
51121+ return 1;
51122+
51123+ memset(subj_map_set.s_hash, 0,
51124+ sizeof(struct subject_map *) * subj_map_set.s_size);
51125+ memset(acl_role_set.r_hash, 0,
51126+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
51127+ memset(name_set.n_hash, 0,
51128+ sizeof (struct name_entry *) * name_set.n_size);
51129+ memset(inodev_set.i_hash, 0,
51130+ sizeof (struct inodev_entry *) * inodev_set.i_size);
51131+
51132+ return 0;
51133+}
51134+
51135+/* free information not needed after startup
51136+ currently contains user->kernel pointer mappings for subjects
51137+*/
51138+
51139+static void
51140+free_init_variables(void)
51141+{
51142+ __u32 i;
51143+
51144+ if (subj_map_set.s_hash) {
51145+ for (i = 0; i < subj_map_set.s_size; i++) {
51146+ if (subj_map_set.s_hash[i]) {
51147+ kfree(subj_map_set.s_hash[i]);
51148+ subj_map_set.s_hash[i] = NULL;
51149+ }
51150+ }
51151+
51152+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
51153+ PAGE_SIZE)
51154+ kfree(subj_map_set.s_hash);
51155+ else
51156+ vfree(subj_map_set.s_hash);
51157+ }
51158+
51159+ return;
51160+}
51161+
51162+static void
51163+free_variables(void)
51164+{
51165+ struct acl_subject_label *s;
51166+ struct acl_role_label *r;
51167+ struct task_struct *task, *task2;
51168+ unsigned int x;
51169+
51170+ gr_clear_learn_entries();
51171+
51172+ read_lock(&tasklist_lock);
51173+ do_each_thread(task2, task) {
51174+ task->acl_sp_role = 0;
51175+ task->acl_role_id = 0;
51176+ task->acl = NULL;
51177+ task->role = NULL;
51178+ } while_each_thread(task2, task);
51179+ read_unlock(&tasklist_lock);
51180+
51181+ /* release the reference to the real root dentry and vfsmount */
51182+ path_put(&real_root);
51183+ memset(&real_root, 0, sizeof(real_root));
51184+
51185+ /* free all object hash tables */
51186+
51187+ FOR_EACH_ROLE_START(r)
51188+ if (r->subj_hash == NULL)
51189+ goto next_role;
51190+ FOR_EACH_SUBJECT_START(r, s, x)
51191+ if (s->obj_hash == NULL)
51192+ break;
51193+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51194+ kfree(s->obj_hash);
51195+ else
51196+ vfree(s->obj_hash);
51197+ FOR_EACH_SUBJECT_END(s, x)
51198+ FOR_EACH_NESTED_SUBJECT_START(r, s)
51199+ if (s->obj_hash == NULL)
51200+ break;
51201+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51202+ kfree(s->obj_hash);
51203+ else
51204+ vfree(s->obj_hash);
51205+ FOR_EACH_NESTED_SUBJECT_END(s)
51206+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
51207+ kfree(r->subj_hash);
51208+ else
51209+ vfree(r->subj_hash);
51210+ r->subj_hash = NULL;
51211+next_role:
51212+ FOR_EACH_ROLE_END(r)
51213+
51214+ acl_free_all();
51215+
51216+ if (acl_role_set.r_hash) {
51217+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
51218+ PAGE_SIZE)
51219+ kfree(acl_role_set.r_hash);
51220+ else
51221+ vfree(acl_role_set.r_hash);
51222+ }
51223+ if (name_set.n_hash) {
51224+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
51225+ PAGE_SIZE)
51226+ kfree(name_set.n_hash);
51227+ else
51228+ vfree(name_set.n_hash);
51229+ }
51230+
51231+ if (inodev_set.i_hash) {
51232+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
51233+ PAGE_SIZE)
51234+ kfree(inodev_set.i_hash);
51235+ else
51236+ vfree(inodev_set.i_hash);
51237+ }
51238+
51239+ gr_free_uidset();
51240+
51241+ memset(&name_set, 0, sizeof (struct name_db));
51242+ memset(&inodev_set, 0, sizeof (struct inodev_db));
51243+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
51244+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
51245+
51246+ default_role = NULL;
51247+ kernel_role = NULL;
51248+ role_list = NULL;
51249+
51250+ return;
51251+}
51252+
51253+static __u32
51254+count_user_objs(struct acl_object_label *userp)
51255+{
51256+ struct acl_object_label o_tmp;
51257+ __u32 num = 0;
51258+
51259+ while (userp) {
51260+ if (copy_from_user(&o_tmp, userp,
51261+ sizeof (struct acl_object_label)))
51262+ break;
51263+
51264+ userp = o_tmp.prev;
51265+ num++;
51266+ }
51267+
51268+ return num;
51269+}
51270+
51271+static struct acl_subject_label *
51272+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
51273+
51274+static int
51275+copy_user_glob(struct acl_object_label *obj)
51276+{
51277+ struct acl_object_label *g_tmp, **guser;
51278+ unsigned int len;
51279+ char *tmp;
51280+
51281+ if (obj->globbed == NULL)
51282+ return 0;
51283+
51284+ guser = &obj->globbed;
51285+ while (*guser) {
51286+ g_tmp = (struct acl_object_label *)
51287+ acl_alloc(sizeof (struct acl_object_label));
51288+ if (g_tmp == NULL)
51289+ return -ENOMEM;
51290+
51291+ if (copy_from_user(g_tmp, *guser,
51292+ sizeof (struct acl_object_label)))
51293+ return -EFAULT;
51294+
51295+ len = strnlen_user(g_tmp->filename, PATH_MAX);
51296+
51297+ if (!len || len >= PATH_MAX)
51298+ return -EINVAL;
51299+
51300+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51301+ return -ENOMEM;
51302+
51303+ if (copy_from_user(tmp, g_tmp->filename, len))
51304+ return -EFAULT;
51305+ tmp[len-1] = '\0';
51306+ g_tmp->filename = tmp;
51307+
51308+ *guser = g_tmp;
51309+ guser = &(g_tmp->next);
51310+ }
51311+
51312+ return 0;
51313+}
51314+
51315+static int
51316+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
51317+ struct acl_role_label *role)
51318+{
51319+ struct acl_object_label *o_tmp;
51320+ unsigned int len;
51321+ int ret;
51322+ char *tmp;
51323+
51324+ while (userp) {
51325+ if ((o_tmp = (struct acl_object_label *)
51326+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
51327+ return -ENOMEM;
51328+
51329+ if (copy_from_user(o_tmp, userp,
51330+ sizeof (struct acl_object_label)))
51331+ return -EFAULT;
51332+
51333+ userp = o_tmp->prev;
51334+
51335+ len = strnlen_user(o_tmp->filename, PATH_MAX);
51336+
51337+ if (!len || len >= PATH_MAX)
51338+ return -EINVAL;
51339+
51340+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51341+ return -ENOMEM;
51342+
51343+ if (copy_from_user(tmp, o_tmp->filename, len))
51344+ return -EFAULT;
51345+ tmp[len-1] = '\0';
51346+ o_tmp->filename = tmp;
51347+
51348+ insert_acl_obj_label(o_tmp, subj);
51349+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
51350+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
51351+ return -ENOMEM;
51352+
51353+ ret = copy_user_glob(o_tmp);
51354+ if (ret)
51355+ return ret;
51356+
51357+ if (o_tmp->nested) {
51358+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
51359+ if (IS_ERR(o_tmp->nested))
51360+ return PTR_ERR(o_tmp->nested);
51361+
51362+ /* insert into nested subject list */
51363+ o_tmp->nested->next = role->hash->first;
51364+ role->hash->first = o_tmp->nested;
51365+ }
51366+ }
51367+
51368+ return 0;
51369+}
51370+
51371+static __u32
51372+count_user_subjs(struct acl_subject_label *userp)
51373+{
51374+ struct acl_subject_label s_tmp;
51375+ __u32 num = 0;
51376+
51377+ while (userp) {
51378+ if (copy_from_user(&s_tmp, userp,
51379+ sizeof (struct acl_subject_label)))
51380+ break;
51381+
51382+ userp = s_tmp.prev;
51383+ /* do not count nested subjects against this count, since
51384+ they are not included in the hash table, but are
51385+ attached to objects. We have already counted
51386+ the subjects in userspace for the allocation
51387+ stack
51388+ */
51389+ if (!(s_tmp.mode & GR_NESTED))
51390+ num++;
51391+ }
51392+
51393+ return num;
51394+}
51395+
51396+static int
51397+copy_user_allowedips(struct acl_role_label *rolep)
51398+{
51399+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
51400+
51401+ ruserip = rolep->allowed_ips;
51402+
51403+ while (ruserip) {
51404+ rlast = rtmp;
51405+
51406+ if ((rtmp = (struct role_allowed_ip *)
51407+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
51408+ return -ENOMEM;
51409+
51410+ if (copy_from_user(rtmp, ruserip,
51411+ sizeof (struct role_allowed_ip)))
51412+ return -EFAULT;
51413+
51414+ ruserip = rtmp->prev;
51415+
51416+ if (!rlast) {
51417+ rtmp->prev = NULL;
51418+ rolep->allowed_ips = rtmp;
51419+ } else {
51420+ rlast->next = rtmp;
51421+ rtmp->prev = rlast;
51422+ }
51423+
51424+ if (!ruserip)
51425+ rtmp->next = NULL;
51426+ }
51427+
51428+ return 0;
51429+}
51430+
51431+static int
51432+copy_user_transitions(struct acl_role_label *rolep)
51433+{
51434+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
51435+
51436+ unsigned int len;
51437+ char *tmp;
51438+
51439+ rusertp = rolep->transitions;
51440+
51441+ while (rusertp) {
51442+ rlast = rtmp;
51443+
51444+ if ((rtmp = (struct role_transition *)
51445+ acl_alloc(sizeof (struct role_transition))) == NULL)
51446+ return -ENOMEM;
51447+
51448+ if (copy_from_user(rtmp, rusertp,
51449+ sizeof (struct role_transition)))
51450+ return -EFAULT;
51451+
51452+ rusertp = rtmp->prev;
51453+
51454+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
51455+
51456+ if (!len || len >= GR_SPROLE_LEN)
51457+ return -EINVAL;
51458+
51459+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51460+ return -ENOMEM;
51461+
51462+ if (copy_from_user(tmp, rtmp->rolename, len))
51463+ return -EFAULT;
51464+ tmp[len-1] = '\0';
51465+ rtmp->rolename = tmp;
51466+
51467+ if (!rlast) {
51468+ rtmp->prev = NULL;
51469+ rolep->transitions = rtmp;
51470+ } else {
51471+ rlast->next = rtmp;
51472+ rtmp->prev = rlast;
51473+ }
51474+
51475+ if (!rusertp)
51476+ rtmp->next = NULL;
51477+ }
51478+
51479+ return 0;
51480+}
51481+
51482+static struct acl_subject_label *
51483+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
51484+{
51485+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
51486+ unsigned int len;
51487+ char *tmp;
51488+ __u32 num_objs;
51489+ struct acl_ip_label **i_tmp, *i_utmp2;
51490+ struct gr_hash_struct ghash;
51491+ struct subject_map *subjmap;
51492+ unsigned int i_num;
51493+ int err;
51494+
51495+ s_tmp = lookup_subject_map(userp);
51496+
51497+ /* we've already copied this subject into the kernel, just return
51498+ the reference to it, and don't copy it over again
51499+ */
51500+ if (s_tmp)
51501+ return(s_tmp);
51502+
51503+ if ((s_tmp = (struct acl_subject_label *)
51504+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
51505+ return ERR_PTR(-ENOMEM);
51506+
51507+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
51508+ if (subjmap == NULL)
51509+ return ERR_PTR(-ENOMEM);
51510+
51511+ subjmap->user = userp;
51512+ subjmap->kernel = s_tmp;
51513+ insert_subj_map_entry(subjmap);
51514+
51515+ if (copy_from_user(s_tmp, userp,
51516+ sizeof (struct acl_subject_label)))
51517+ return ERR_PTR(-EFAULT);
51518+
51519+ len = strnlen_user(s_tmp->filename, PATH_MAX);
51520+
51521+ if (!len || len >= PATH_MAX)
51522+ return ERR_PTR(-EINVAL);
51523+
51524+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51525+ return ERR_PTR(-ENOMEM);
51526+
51527+ if (copy_from_user(tmp, s_tmp->filename, len))
51528+ return ERR_PTR(-EFAULT);
51529+ tmp[len-1] = '\0';
51530+ s_tmp->filename = tmp;
51531+
51532+ if (!strcmp(s_tmp->filename, "/"))
51533+ role->root_label = s_tmp;
51534+
51535+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
51536+ return ERR_PTR(-EFAULT);
51537+
51538+ /* copy user and group transition tables */
51539+
51540+ if (s_tmp->user_trans_num) {
51541+ uid_t *uidlist;
51542+
51543+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
51544+ if (uidlist == NULL)
51545+ return ERR_PTR(-ENOMEM);
51546+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
51547+ return ERR_PTR(-EFAULT);
51548+
51549+ s_tmp->user_transitions = uidlist;
51550+ }
51551+
51552+ if (s_tmp->group_trans_num) {
51553+ gid_t *gidlist;
51554+
51555+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
51556+ if (gidlist == NULL)
51557+ return ERR_PTR(-ENOMEM);
51558+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
51559+ return ERR_PTR(-EFAULT);
51560+
51561+ s_tmp->group_transitions = gidlist;
51562+ }
51563+
51564+ /* set up object hash table */
51565+ num_objs = count_user_objs(ghash.first);
51566+
51567+ s_tmp->obj_hash_size = num_objs;
51568+ s_tmp->obj_hash =
51569+ (struct acl_object_label **)
51570+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
51571+
51572+ if (!s_tmp->obj_hash)
51573+ return ERR_PTR(-ENOMEM);
51574+
51575+ memset(s_tmp->obj_hash, 0,
51576+ s_tmp->obj_hash_size *
51577+ sizeof (struct acl_object_label *));
51578+
51579+ /* add in objects */
51580+ err = copy_user_objs(ghash.first, s_tmp, role);
51581+
51582+ if (err)
51583+ return ERR_PTR(err);
51584+
51585+ /* set pointer for parent subject */
51586+ if (s_tmp->parent_subject) {
51587+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
51588+
51589+ if (IS_ERR(s_tmp2))
51590+ return s_tmp2;
51591+
51592+ s_tmp->parent_subject = s_tmp2;
51593+ }
51594+
51595+ /* add in ip acls */
51596+
51597+ if (!s_tmp->ip_num) {
51598+ s_tmp->ips = NULL;
51599+ goto insert;
51600+ }
51601+
51602+ i_tmp =
51603+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
51604+ sizeof (struct acl_ip_label *));
51605+
51606+ if (!i_tmp)
51607+ return ERR_PTR(-ENOMEM);
51608+
51609+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
51610+ *(i_tmp + i_num) =
51611+ (struct acl_ip_label *)
51612+ acl_alloc(sizeof (struct acl_ip_label));
51613+ if (!*(i_tmp + i_num))
51614+ return ERR_PTR(-ENOMEM);
51615+
51616+ if (copy_from_user
51617+ (&i_utmp2, s_tmp->ips + i_num,
51618+ sizeof (struct acl_ip_label *)))
51619+ return ERR_PTR(-EFAULT);
51620+
51621+ if (copy_from_user
51622+ (*(i_tmp + i_num), i_utmp2,
51623+ sizeof (struct acl_ip_label)))
51624+ return ERR_PTR(-EFAULT);
51625+
51626+ if ((*(i_tmp + i_num))->iface == NULL)
51627+ continue;
51628+
51629+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
51630+ if (!len || len >= IFNAMSIZ)
51631+ return ERR_PTR(-EINVAL);
51632+ tmp = acl_alloc(len);
51633+ if (tmp == NULL)
51634+ return ERR_PTR(-ENOMEM);
51635+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
51636+ return ERR_PTR(-EFAULT);
51637+ (*(i_tmp + i_num))->iface = tmp;
51638+ }
51639+
51640+ s_tmp->ips = i_tmp;
51641+
51642+insert:
51643+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
51644+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
51645+ return ERR_PTR(-ENOMEM);
51646+
51647+ return s_tmp;
51648+}
51649+
51650+static int
51651+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
51652+{
51653+ struct acl_subject_label s_pre;
51654+ struct acl_subject_label * ret;
51655+ int err;
51656+
51657+ while (userp) {
51658+ if (copy_from_user(&s_pre, userp,
51659+ sizeof (struct acl_subject_label)))
51660+ return -EFAULT;
51661+
51662+ /* do not add nested subjects here, add
51663+ while parsing objects
51664+ */
51665+
51666+ if (s_pre.mode & GR_NESTED) {
51667+ userp = s_pre.prev;
51668+ continue;
51669+ }
51670+
51671+ ret = do_copy_user_subj(userp, role);
51672+
51673+ err = PTR_ERR(ret);
51674+ if (IS_ERR(ret))
51675+ return err;
51676+
51677+ insert_acl_subj_label(ret, role);
51678+
51679+ userp = s_pre.prev;
51680+ }
51681+
51682+ return 0;
51683+}
51684+
51685+static int
51686+copy_user_acl(struct gr_arg *arg)
51687+{
51688+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
51689+ struct sprole_pw *sptmp;
51690+ struct gr_hash_struct *ghash;
51691+ uid_t *domainlist;
51692+ unsigned int r_num;
51693+ unsigned int len;
51694+ char *tmp;
51695+ int err = 0;
51696+ __u16 i;
51697+ __u32 num_subjs;
51698+
51699+ /* we need a default and kernel role */
51700+ if (arg->role_db.num_roles < 2)
51701+ return -EINVAL;
51702+
51703+ /* copy special role authentication info from userspace */
51704+
51705+ num_sprole_pws = arg->num_sprole_pws;
51706+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
51707+
51708+ if (!acl_special_roles && num_sprole_pws)
51709+ return -ENOMEM;
51710+
51711+ for (i = 0; i < num_sprole_pws; i++) {
51712+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
51713+ if (!sptmp)
51714+ return -ENOMEM;
51715+ if (copy_from_user(sptmp, arg->sprole_pws + i,
51716+ sizeof (struct sprole_pw)))
51717+ return -EFAULT;
51718+
51719+ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
51720+
51721+ if (!len || len >= GR_SPROLE_LEN)
51722+ return -EINVAL;
51723+
51724+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51725+ return -ENOMEM;
51726+
51727+ if (copy_from_user(tmp, sptmp->rolename, len))
51728+ return -EFAULT;
51729+
51730+ tmp[len-1] = '\0';
51731+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51732+ printk(KERN_ALERT "Copying special role %s\n", tmp);
51733+#endif
51734+ sptmp->rolename = tmp;
51735+ acl_special_roles[i] = sptmp;
51736+ }
51737+
51738+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
51739+
51740+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
51741+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
51742+
51743+ if (!r_tmp)
51744+ return -ENOMEM;
51745+
51746+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
51747+ sizeof (struct acl_role_label *)))
51748+ return -EFAULT;
51749+
51750+ if (copy_from_user(r_tmp, r_utmp2,
51751+ sizeof (struct acl_role_label)))
51752+ return -EFAULT;
51753+
51754+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
51755+
51756+ if (!len || len >= PATH_MAX)
51757+ return -EINVAL;
51758+
51759+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51760+ return -ENOMEM;
51761+
51762+ if (copy_from_user(tmp, r_tmp->rolename, len))
51763+ return -EFAULT;
51764+
51765+ tmp[len-1] = '\0';
51766+ r_tmp->rolename = tmp;
51767+
51768+ if (!strcmp(r_tmp->rolename, "default")
51769+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
51770+ default_role = r_tmp;
51771+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
51772+ kernel_role = r_tmp;
51773+ }
51774+
51775+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
51776+ return -ENOMEM;
51777+
51778+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
51779+ return -EFAULT;
51780+
51781+ r_tmp->hash = ghash;
51782+
51783+ num_subjs = count_user_subjs(r_tmp->hash->first);
51784+
51785+ r_tmp->subj_hash_size = num_subjs;
51786+ r_tmp->subj_hash =
51787+ (struct acl_subject_label **)
51788+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
51789+
51790+ if (!r_tmp->subj_hash)
51791+ return -ENOMEM;
51792+
51793+ err = copy_user_allowedips(r_tmp);
51794+ if (err)
51795+ return err;
51796+
51797+ /* copy domain info */
51798+ if (r_tmp->domain_children != NULL) {
51799+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
51800+ if (domainlist == NULL)
51801+ return -ENOMEM;
51802+
51803+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
51804+ return -EFAULT;
51805+
51806+ r_tmp->domain_children = domainlist;
51807+ }
51808+
51809+ err = copy_user_transitions(r_tmp);
51810+ if (err)
51811+ return err;
51812+
51813+ memset(r_tmp->subj_hash, 0,
51814+ r_tmp->subj_hash_size *
51815+ sizeof (struct acl_subject_label *));
51816+
51817+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
51818+
51819+ if (err)
51820+ return err;
51821+
51822+ /* set nested subject list to null */
51823+ r_tmp->hash->first = NULL;
51824+
51825+ insert_acl_role_label(r_tmp);
51826+ }
51827+
51828+ if (default_role == NULL || kernel_role == NULL)
51829+ return -EINVAL;
51830+
51831+ return err;
51832+}
51833+
51834+static int
51835+gracl_init(struct gr_arg *args)
51836+{
51837+ int error = 0;
51838+
51839+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
51840+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
51841+
51842+ if (init_variables(args)) {
51843+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
51844+ error = -ENOMEM;
51845+ free_variables();
51846+ goto out;
51847+ }
51848+
51849+ error = copy_user_acl(args);
51850+ free_init_variables();
51851+ if (error) {
51852+ free_variables();
51853+ goto out;
51854+ }
51855+
51856+ if ((error = gr_set_acls(0))) {
51857+ free_variables();
51858+ goto out;
51859+ }
51860+
51861+ pax_open_kernel();
51862+ gr_status |= GR_READY;
51863+ pax_close_kernel();
51864+
51865+ out:
51866+ return error;
51867+}
51868+
51869+/* derived from glibc fnmatch() 0: match, 1: no match*/
51870+
51871+static int
51872+glob_match(const char *p, const char *n)
51873+{
51874+ char c;
51875+
51876+ while ((c = *p++) != '\0') {
51877+ switch (c) {
51878+ case '?':
51879+ if (*n == '\0')
51880+ return 1;
51881+ else if (*n == '/')
51882+ return 1;
51883+ break;
51884+ case '\\':
51885+ if (*n != c)
51886+ return 1;
51887+ break;
51888+ case '*':
51889+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
51890+ if (*n == '/')
51891+ return 1;
51892+ else if (c == '?') {
51893+ if (*n == '\0')
51894+ return 1;
51895+ else
51896+ ++n;
51897+ }
51898+ }
51899+ if (c == '\0') {
51900+ return 0;
51901+ } else {
51902+ const char *endp;
51903+
51904+ if ((endp = strchr(n, '/')) == NULL)
51905+ endp = n + strlen(n);
51906+
51907+ if (c == '[') {
51908+ for (--p; n < endp; ++n)
51909+ if (!glob_match(p, n))
51910+ return 0;
51911+ } else if (c == '/') {
51912+ while (*n != '\0' && *n != '/')
51913+ ++n;
51914+ if (*n == '/' && !glob_match(p, n + 1))
51915+ return 0;
51916+ } else {
51917+ for (--p; n < endp; ++n)
51918+ if (*n == c && !glob_match(p, n))
51919+ return 0;
51920+ }
51921+
51922+ return 1;
51923+ }
51924+ case '[':
51925+ {
51926+ int not;
51927+ char cold;
51928+
51929+ if (*n == '\0' || *n == '/')
51930+ return 1;
51931+
51932+ not = (*p == '!' || *p == '^');
51933+ if (not)
51934+ ++p;
51935+
51936+ c = *p++;
51937+ for (;;) {
51938+ unsigned char fn = (unsigned char)*n;
51939+
51940+ if (c == '\0')
51941+ return 1;
51942+ else {
51943+ if (c == fn)
51944+ goto matched;
51945+ cold = c;
51946+ c = *p++;
51947+
51948+ if (c == '-' && *p != ']') {
51949+ unsigned char cend = *p++;
51950+
51951+ if (cend == '\0')
51952+ return 1;
51953+
51954+ if (cold <= fn && fn <= cend)
51955+ goto matched;
51956+
51957+ c = *p++;
51958+ }
51959+ }
51960+
51961+ if (c == ']')
51962+ break;
51963+ }
51964+ if (!not)
51965+ return 1;
51966+ break;
51967+ matched:
51968+ while (c != ']') {
51969+ if (c == '\0')
51970+ return 1;
51971+
51972+ c = *p++;
51973+ }
51974+ if (not)
51975+ return 1;
51976+ }
51977+ break;
51978+ default:
51979+ if (c != *n)
51980+ return 1;
51981+ }
51982+
51983+ ++n;
51984+ }
51985+
51986+ if (*n == '\0')
51987+ return 0;
51988+
51989+ if (*n == '/')
51990+ return 0;
51991+
51992+ return 1;
51993+}
51994+
51995+static struct acl_object_label *
51996+chk_glob_label(struct acl_object_label *globbed,
51997+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
51998+{
51999+ struct acl_object_label *tmp;
52000+
52001+ if (*path == NULL)
52002+ *path = gr_to_filename_nolock(dentry, mnt);
52003+
52004+ tmp = globbed;
52005+
52006+ while (tmp) {
52007+ if (!glob_match(tmp->filename, *path))
52008+ return tmp;
52009+ tmp = tmp->next;
52010+ }
52011+
52012+ return NULL;
52013+}
52014+
52015+static struct acl_object_label *
52016+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
52017+ const ino_t curr_ino, const dev_t curr_dev,
52018+ const struct acl_subject_label *subj, char **path, const int checkglob)
52019+{
52020+ struct acl_subject_label *tmpsubj;
52021+ struct acl_object_label *retval;
52022+ struct acl_object_label *retval2;
52023+
52024+ tmpsubj = (struct acl_subject_label *) subj;
52025+ read_lock(&gr_inode_lock);
52026+ do {
52027+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
52028+ if (retval) {
52029+ if (checkglob && retval->globbed) {
52030+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
52031+ if (retval2)
52032+ retval = retval2;
52033+ }
52034+ break;
52035+ }
52036+ } while ((tmpsubj = tmpsubj->parent_subject));
52037+ read_unlock(&gr_inode_lock);
52038+
52039+ return retval;
52040+}
52041+
52042+static __inline__ struct acl_object_label *
52043+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
52044+ struct dentry *curr_dentry,
52045+ const struct acl_subject_label *subj, char **path, const int checkglob)
52046+{
52047+ int newglob = checkglob;
52048+ ino_t inode;
52049+ dev_t device;
52050+
52051+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
52052+ as we don't want a / * rule to match instead of the / object
52053+ don't do this for create lookups that call this function though, since they're looking up
52054+ on the parent and thus need globbing checks on all paths
52055+ */
52056+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
52057+ newglob = GR_NO_GLOB;
52058+
52059+ spin_lock(&curr_dentry->d_lock);
52060+ inode = curr_dentry->d_inode->i_ino;
52061+ device = __get_dev(curr_dentry);
52062+ spin_unlock(&curr_dentry->d_lock);
52063+
52064+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
52065+}
52066+
52067+static struct acl_object_label *
52068+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52069+ const struct acl_subject_label *subj, char *path, const int checkglob)
52070+{
52071+ struct dentry *dentry = (struct dentry *) l_dentry;
52072+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52073+ struct mount *real_mnt = real_mount(mnt);
52074+ struct acl_object_label *retval;
52075+ struct dentry *parent;
52076+
52077+ write_seqlock(&rename_lock);
52078+ br_read_lock(vfsmount_lock);
52079+
52080+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
52081+#ifdef CONFIG_NET
52082+ mnt == sock_mnt ||
52083+#endif
52084+#ifdef CONFIG_HUGETLBFS
52085+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
52086+#endif
52087+ /* ignore Eric Biederman */
52088+ IS_PRIVATE(l_dentry->d_inode))) {
52089+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
52090+ goto out;
52091+ }
52092+
52093+ for (;;) {
52094+ if (dentry == real_root.dentry && mnt == real_root.mnt)
52095+ break;
52096+
52097+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52098+ if (!mnt_has_parent(real_mnt))
52099+ break;
52100+
52101+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52102+ if (retval != NULL)
52103+ goto out;
52104+
52105+ dentry = real_mnt->mnt_mountpoint;
52106+ real_mnt = real_mnt->mnt_parent;
52107+ mnt = &real_mnt->mnt;
52108+ continue;
52109+ }
52110+
52111+ parent = dentry->d_parent;
52112+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52113+ if (retval != NULL)
52114+ goto out;
52115+
52116+ dentry = parent;
52117+ }
52118+
52119+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52120+
52121+ /* real_root is pinned so we don't have to hold a reference */
52122+ if (retval == NULL)
52123+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
52124+out:
52125+ br_read_unlock(vfsmount_lock);
52126+ write_sequnlock(&rename_lock);
52127+
52128+ BUG_ON(retval == NULL);
52129+
52130+ return retval;
52131+}
52132+
52133+static __inline__ struct acl_object_label *
52134+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52135+ const struct acl_subject_label *subj)
52136+{
52137+ char *path = NULL;
52138+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
52139+}
52140+
52141+static __inline__ struct acl_object_label *
52142+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52143+ const struct acl_subject_label *subj)
52144+{
52145+ char *path = NULL;
52146+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
52147+}
52148+
52149+static __inline__ struct acl_object_label *
52150+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52151+ const struct acl_subject_label *subj, char *path)
52152+{
52153+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
52154+}
52155+
52156+static struct acl_subject_label *
52157+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52158+ const struct acl_role_label *role)
52159+{
52160+ struct dentry *dentry = (struct dentry *) l_dentry;
52161+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52162+ struct mount *real_mnt = real_mount(mnt);
52163+ struct acl_subject_label *retval;
52164+ struct dentry *parent;
52165+
52166+ write_seqlock(&rename_lock);
52167+ br_read_lock(vfsmount_lock);
52168+
52169+ for (;;) {
52170+ if (dentry == real_root.dentry && mnt == real_root.mnt)
52171+ break;
52172+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52173+ if (!mnt_has_parent(real_mnt))
52174+ break;
52175+
52176+ spin_lock(&dentry->d_lock);
52177+ read_lock(&gr_inode_lock);
52178+ retval =
52179+ lookup_acl_subj_label(dentry->d_inode->i_ino,
52180+ __get_dev(dentry), role);
52181+ read_unlock(&gr_inode_lock);
52182+ spin_unlock(&dentry->d_lock);
52183+ if (retval != NULL)
52184+ goto out;
52185+
52186+ dentry = real_mnt->mnt_mountpoint;
52187+ real_mnt = real_mnt->mnt_parent;
52188+ mnt = &real_mnt->mnt;
52189+ continue;
52190+ }
52191+
52192+ spin_lock(&dentry->d_lock);
52193+ read_lock(&gr_inode_lock);
52194+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52195+ __get_dev(dentry), role);
52196+ read_unlock(&gr_inode_lock);
52197+ parent = dentry->d_parent;
52198+ spin_unlock(&dentry->d_lock);
52199+
52200+ if (retval != NULL)
52201+ goto out;
52202+
52203+ dentry = parent;
52204+ }
52205+
52206+ spin_lock(&dentry->d_lock);
52207+ read_lock(&gr_inode_lock);
52208+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52209+ __get_dev(dentry), role);
52210+ read_unlock(&gr_inode_lock);
52211+ spin_unlock(&dentry->d_lock);
52212+
52213+ if (unlikely(retval == NULL)) {
52214+ /* real_root is pinned, we don't need to hold a reference */
52215+ read_lock(&gr_inode_lock);
52216+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
52217+ __get_dev(real_root.dentry), role);
52218+ read_unlock(&gr_inode_lock);
52219+ }
52220+out:
52221+ br_read_unlock(vfsmount_lock);
52222+ write_sequnlock(&rename_lock);
52223+
52224+ BUG_ON(retval == NULL);
52225+
52226+ return retval;
52227+}
52228+
52229+static void
52230+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
52231+{
52232+ struct task_struct *task = current;
52233+ const struct cred *cred = current_cred();
52234+
52235+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
52236+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52237+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52238+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
52239+
52240+ return;
52241+}
52242+
52243+static void
52244+gr_log_learn_id_change(const char type, const unsigned int real,
52245+ const unsigned int effective, const unsigned int fs)
52246+{
52247+ struct task_struct *task = current;
52248+ const struct cred *cred = current_cred();
52249+
52250+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
52251+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52252+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52253+ type, real, effective, fs, &task->signal->saved_ip);
52254+
52255+ return;
52256+}
52257+
52258+__u32
52259+gr_search_file(const struct dentry * dentry, const __u32 mode,
52260+ const struct vfsmount * mnt)
52261+{
52262+ __u32 retval = mode;
52263+ struct acl_subject_label *curracl;
52264+ struct acl_object_label *currobj;
52265+
52266+ if (unlikely(!(gr_status & GR_READY)))
52267+ return (mode & ~GR_AUDITS);
52268+
52269+ curracl = current->acl;
52270+
52271+ currobj = chk_obj_label(dentry, mnt, curracl);
52272+ retval = currobj->mode & mode;
52273+
52274+ /* if we're opening a specified transfer file for writing
52275+ (e.g. /dev/initctl), then transfer our role to init
52276+ */
52277+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
52278+ current->role->roletype & GR_ROLE_PERSIST)) {
52279+ struct task_struct *task = init_pid_ns.child_reaper;
52280+
52281+ if (task->role != current->role) {
52282+ task->acl_sp_role = 0;
52283+ task->acl_role_id = current->acl_role_id;
52284+ task->role = current->role;
52285+ rcu_read_lock();
52286+ read_lock(&grsec_exec_file_lock);
52287+ gr_apply_subject_to_task(task);
52288+ read_unlock(&grsec_exec_file_lock);
52289+ rcu_read_unlock();
52290+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
52291+ }
52292+ }
52293+
52294+ if (unlikely
52295+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
52296+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
52297+ __u32 new_mode = mode;
52298+
52299+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52300+
52301+ retval = new_mode;
52302+
52303+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
52304+ new_mode |= GR_INHERIT;
52305+
52306+ if (!(mode & GR_NOLEARN))
52307+ gr_log_learn(dentry, mnt, new_mode);
52308+ }
52309+
52310+ return retval;
52311+}
52312+
52313+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
52314+ const struct dentry *parent,
52315+ const struct vfsmount *mnt)
52316+{
52317+ struct name_entry *match;
52318+ struct acl_object_label *matchpo;
52319+ struct acl_subject_label *curracl;
52320+ char *path;
52321+
52322+ if (unlikely(!(gr_status & GR_READY)))
52323+ return NULL;
52324+
52325+ preempt_disable();
52326+ path = gr_to_filename_rbac(new_dentry, mnt);
52327+ match = lookup_name_entry_create(path);
52328+
52329+ curracl = current->acl;
52330+
52331+ if (match) {
52332+ read_lock(&gr_inode_lock);
52333+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
52334+ read_unlock(&gr_inode_lock);
52335+
52336+ if (matchpo) {
52337+ preempt_enable();
52338+ return matchpo;
52339+ }
52340+ }
52341+
52342+ // lookup parent
52343+
52344+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
52345+
52346+ preempt_enable();
52347+ return matchpo;
52348+}
52349+
52350+__u32
52351+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
52352+ const struct vfsmount * mnt, const __u32 mode)
52353+{
52354+ struct acl_object_label *matchpo;
52355+ __u32 retval;
52356+
52357+ if (unlikely(!(gr_status & GR_READY)))
52358+ return (mode & ~GR_AUDITS);
52359+
52360+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
52361+
52362+ retval = matchpo->mode & mode;
52363+
52364+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
52365+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
52366+ __u32 new_mode = mode;
52367+
52368+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52369+
52370+ gr_log_learn(new_dentry, mnt, new_mode);
52371+ return new_mode;
52372+ }
52373+
52374+ return retval;
52375+}
52376+
52377+__u32
52378+gr_check_link(const struct dentry * new_dentry,
52379+ const struct dentry * parent_dentry,
52380+ const struct vfsmount * parent_mnt,
52381+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
52382+{
52383+ struct acl_object_label *obj;
52384+ __u32 oldmode, newmode;
52385+ __u32 needmode;
52386+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
52387+ GR_DELETE | GR_INHERIT;
52388+
52389+ if (unlikely(!(gr_status & GR_READY)))
52390+ return (GR_CREATE | GR_LINK);
52391+
52392+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
52393+ oldmode = obj->mode;
52394+
52395+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
52396+ newmode = obj->mode;
52397+
52398+ needmode = newmode & checkmodes;
52399+
52400+ // old name for hardlink must have at least the permissions of the new name
52401+ if ((oldmode & needmode) != needmode)
52402+ goto bad;
52403+
52404+ // if old name had restrictions/auditing, make sure the new name does as well
52405+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
52406+
52407+ // don't allow hardlinking of suid/sgid files without permission
52408+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52409+ needmode |= GR_SETID;
52410+
52411+ if ((newmode & needmode) != needmode)
52412+ goto bad;
52413+
52414+ // enforce minimum permissions
52415+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
52416+ return newmode;
52417+bad:
52418+ needmode = oldmode;
52419+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52420+ needmode |= GR_SETID;
52421+
52422+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
52423+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
52424+ return (GR_CREATE | GR_LINK);
52425+ } else if (newmode & GR_SUPPRESS)
52426+ return GR_SUPPRESS;
52427+ else
52428+ return 0;
52429+}
52430+
52431+int
52432+gr_check_hidden_task(const struct task_struct *task)
52433+{
52434+ if (unlikely(!(gr_status & GR_READY)))
52435+ return 0;
52436+
52437+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
52438+ return 1;
52439+
52440+ return 0;
52441+}
52442+
52443+int
52444+gr_check_protected_task(const struct task_struct *task)
52445+{
52446+ if (unlikely(!(gr_status & GR_READY) || !task))
52447+ return 0;
52448+
52449+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52450+ task->acl != current->acl)
52451+ return 1;
52452+
52453+ return 0;
52454+}
52455+
52456+int
52457+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
52458+{
52459+ struct task_struct *p;
52460+ int ret = 0;
52461+
52462+ if (unlikely(!(gr_status & GR_READY) || !pid))
52463+ return ret;
52464+
52465+ read_lock(&tasklist_lock);
52466+ do_each_pid_task(pid, type, p) {
52467+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52468+ p->acl != current->acl) {
52469+ ret = 1;
52470+ goto out;
52471+ }
52472+ } while_each_pid_task(pid, type, p);
52473+out:
52474+ read_unlock(&tasklist_lock);
52475+
52476+ return ret;
52477+}
52478+
52479+void
52480+gr_copy_label(struct task_struct *tsk)
52481+{
52482+ /* plain copying of fields is already done by dup_task_struct */
52483+ tsk->signal->used_accept = 0;
52484+ tsk->acl_sp_role = 0;
52485+ //tsk->acl_role_id = current->acl_role_id;
52486+ //tsk->acl = current->acl;
52487+ //tsk->role = current->role;
52488+ tsk->signal->curr_ip = current->signal->curr_ip;
52489+ tsk->signal->saved_ip = current->signal->saved_ip;
52490+ if (current->exec_file)
52491+ get_file(current->exec_file);
52492+ //tsk->exec_file = current->exec_file;
52493+ //tsk->is_writable = current->is_writable;
52494+ if (unlikely(current->signal->used_accept)) {
52495+ current->signal->curr_ip = 0;
52496+ current->signal->saved_ip = 0;
52497+ }
52498+
52499+ return;
52500+}
52501+
52502+static void
52503+gr_set_proc_res(struct task_struct *task)
52504+{
52505+ struct acl_subject_label *proc;
52506+ unsigned short i;
52507+
52508+ proc = task->acl;
52509+
52510+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
52511+ return;
52512+
52513+ for (i = 0; i < RLIM_NLIMITS; i++) {
52514+ if (!(proc->resmask & (1 << i)))
52515+ continue;
52516+
52517+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
52518+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
52519+ }
52520+
52521+ return;
52522+}
52523+
52524+extern int __gr_process_user_ban(struct user_struct *user);
52525+
52526+int
52527+gr_check_user_change(int real, int effective, int fs)
52528+{
52529+ unsigned int i;
52530+ __u16 num;
52531+ uid_t *uidlist;
52532+ int curuid;
52533+ int realok = 0;
52534+ int effectiveok = 0;
52535+ int fsok = 0;
52536+
52537+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52538+ struct user_struct *user;
52539+
52540+ if (real == -1)
52541+ goto skipit;
52542+
52543+ user = find_user(real);
52544+ if (user == NULL)
52545+ goto skipit;
52546+
52547+ if (__gr_process_user_ban(user)) {
52548+ /* for find_user */
52549+ free_uid(user);
52550+ return 1;
52551+ }
52552+
52553+ /* for find_user */
52554+ free_uid(user);
52555+
52556+skipit:
52557+#endif
52558+
52559+ if (unlikely(!(gr_status & GR_READY)))
52560+ return 0;
52561+
52562+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52563+ gr_log_learn_id_change('u', real, effective, fs);
52564+
52565+ num = current->acl->user_trans_num;
52566+ uidlist = current->acl->user_transitions;
52567+
52568+ if (uidlist == NULL)
52569+ return 0;
52570+
52571+ if (real == -1)
52572+ realok = 1;
52573+ if (effective == -1)
52574+ effectiveok = 1;
52575+ if (fs == -1)
52576+ fsok = 1;
52577+
52578+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
52579+ for (i = 0; i < num; i++) {
52580+ curuid = (int)uidlist[i];
52581+ if (real == curuid)
52582+ realok = 1;
52583+ if (effective == curuid)
52584+ effectiveok = 1;
52585+ if (fs == curuid)
52586+ fsok = 1;
52587+ }
52588+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
52589+ for (i = 0; i < num; i++) {
52590+ curuid = (int)uidlist[i];
52591+ if (real == curuid)
52592+ break;
52593+ if (effective == curuid)
52594+ break;
52595+ if (fs == curuid)
52596+ break;
52597+ }
52598+ /* not in deny list */
52599+ if (i == num) {
52600+ realok = 1;
52601+ effectiveok = 1;
52602+ fsok = 1;
52603+ }
52604+ }
52605+
52606+ if (realok && effectiveok && fsok)
52607+ return 0;
52608+ else {
52609+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
52610+ return 1;
52611+ }
52612+}
52613+
52614+int
52615+gr_check_group_change(int real, int effective, int fs)
52616+{
52617+ unsigned int i;
52618+ __u16 num;
52619+ gid_t *gidlist;
52620+ int curgid;
52621+ int realok = 0;
52622+ int effectiveok = 0;
52623+ int fsok = 0;
52624+
52625+ if (unlikely(!(gr_status & GR_READY)))
52626+ return 0;
52627+
52628+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52629+ gr_log_learn_id_change('g', real, effective, fs);
52630+
52631+ num = current->acl->group_trans_num;
52632+ gidlist = current->acl->group_transitions;
52633+
52634+ if (gidlist == NULL)
52635+ return 0;
52636+
52637+ if (real == -1)
52638+ realok = 1;
52639+ if (effective == -1)
52640+ effectiveok = 1;
52641+ if (fs == -1)
52642+ fsok = 1;
52643+
52644+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
52645+ for (i = 0; i < num; i++) {
52646+ curgid = (int)gidlist[i];
52647+ if (real == curgid)
52648+ realok = 1;
52649+ if (effective == curgid)
52650+ effectiveok = 1;
52651+ if (fs == curgid)
52652+ fsok = 1;
52653+ }
52654+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
52655+ for (i = 0; i < num; i++) {
52656+ curgid = (int)gidlist[i];
52657+ if (real == curgid)
52658+ break;
52659+ if (effective == curgid)
52660+ break;
52661+ if (fs == curgid)
52662+ break;
52663+ }
52664+ /* not in deny list */
52665+ if (i == num) {
52666+ realok = 1;
52667+ effectiveok = 1;
52668+ fsok = 1;
52669+ }
52670+ }
52671+
52672+ if (realok && effectiveok && fsok)
52673+ return 0;
52674+ else {
52675+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
52676+ return 1;
52677+ }
52678+}
52679+
52680+extern int gr_acl_is_capable(const int cap);
52681+
52682+void
52683+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
52684+{
52685+ struct acl_role_label *role = task->role;
52686+ struct acl_subject_label *subj = NULL;
52687+ struct acl_object_label *obj;
52688+ struct file *filp;
52689+
52690+ if (unlikely(!(gr_status & GR_READY)))
52691+ return;
52692+
52693+ filp = task->exec_file;
52694+
52695+ /* kernel process, we'll give them the kernel role */
52696+ if (unlikely(!filp)) {
52697+ task->role = kernel_role;
52698+ task->acl = kernel_role->root_label;
52699+ return;
52700+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
52701+ role = lookup_acl_role_label(task, uid, gid);
52702+
52703+ /* don't change the role if we're not a privileged process */
52704+ if (role && task->role != role &&
52705+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
52706+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
52707+ return;
52708+
52709+ /* perform subject lookup in possibly new role
52710+ we can use this result below in the case where role == task->role
52711+ */
52712+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
52713+
52714+ /* if we changed uid/gid, but result in the same role
52715+ and are using inheritance, don't lose the inherited subject
52716+ if current subject is other than what normal lookup
52717+ would result in, we arrived via inheritance, don't
52718+ lose subject
52719+ */
52720+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
52721+ (subj == task->acl)))
52722+ task->acl = subj;
52723+
52724+ task->role = role;
52725+
52726+ task->is_writable = 0;
52727+
52728+ /* ignore additional mmap checks for processes that are writable
52729+ by the default ACL */
52730+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52731+ if (unlikely(obj->mode & GR_WRITE))
52732+ task->is_writable = 1;
52733+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
52734+ if (unlikely(obj->mode & GR_WRITE))
52735+ task->is_writable = 1;
52736+
52737+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52738+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
52739+#endif
52740+
52741+ gr_set_proc_res(task);
52742+
52743+ return;
52744+}
52745+
52746+int
52747+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
52748+ const int unsafe_flags)
52749+{
52750+ struct task_struct *task = current;
52751+ struct acl_subject_label *newacl;
52752+ struct acl_object_label *obj;
52753+ __u32 retmode;
52754+
52755+ if (unlikely(!(gr_status & GR_READY)))
52756+ return 0;
52757+
52758+ newacl = chk_subj_label(dentry, mnt, task->role);
52759+
52760+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
52761+ did an exec
52762+ */
52763+ rcu_read_lock();
52764+ read_lock(&tasklist_lock);
52765+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
52766+ (task->parent->acl->mode & GR_POVERRIDE))) {
52767+ read_unlock(&tasklist_lock);
52768+ rcu_read_unlock();
52769+ goto skip_check;
52770+ }
52771+ read_unlock(&tasklist_lock);
52772+ rcu_read_unlock();
52773+
52774+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
52775+ !(task->role->roletype & GR_ROLE_GOD) &&
52776+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
52777+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
52778+ if (unsafe_flags & LSM_UNSAFE_SHARE)
52779+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
52780+ else
52781+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
52782+ return -EACCES;
52783+ }
52784+
52785+skip_check:
52786+
52787+ obj = chk_obj_label(dentry, mnt, task->acl);
52788+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
52789+
52790+ if (!(task->acl->mode & GR_INHERITLEARN) &&
52791+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
52792+ if (obj->nested)
52793+ task->acl = obj->nested;
52794+ else
52795+ task->acl = newacl;
52796+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
52797+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
52798+
52799+ task->is_writable = 0;
52800+
52801+ /* ignore additional mmap checks for processes that are writable
52802+ by the default ACL */
52803+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
52804+ if (unlikely(obj->mode & GR_WRITE))
52805+ task->is_writable = 1;
52806+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
52807+ if (unlikely(obj->mode & GR_WRITE))
52808+ task->is_writable = 1;
52809+
52810+ gr_set_proc_res(task);
52811+
52812+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52813+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
52814+#endif
52815+ return 0;
52816+}
52817+
52818+/* always called with valid inodev ptr */
52819+static void
52820+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
52821+{
52822+ struct acl_object_label *matchpo;
52823+ struct acl_subject_label *matchps;
52824+ struct acl_subject_label *subj;
52825+ struct acl_role_label *role;
52826+ unsigned int x;
52827+
52828+ FOR_EACH_ROLE_START(role)
52829+ FOR_EACH_SUBJECT_START(role, subj, x)
52830+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
52831+ matchpo->mode |= GR_DELETED;
52832+ FOR_EACH_SUBJECT_END(subj,x)
52833+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
52834+ if (subj->inode == ino && subj->device == dev)
52835+ subj->mode |= GR_DELETED;
52836+ FOR_EACH_NESTED_SUBJECT_END(subj)
52837+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
52838+ matchps->mode |= GR_DELETED;
52839+ FOR_EACH_ROLE_END(role)
52840+
52841+ inodev->nentry->deleted = 1;
52842+
52843+ return;
52844+}
52845+
52846+void
52847+gr_handle_delete(const ino_t ino, const dev_t dev)
52848+{
52849+ struct inodev_entry *inodev;
52850+
52851+ if (unlikely(!(gr_status & GR_READY)))
52852+ return;
52853+
52854+ write_lock(&gr_inode_lock);
52855+ inodev = lookup_inodev_entry(ino, dev);
52856+ if (inodev != NULL)
52857+ do_handle_delete(inodev, ino, dev);
52858+ write_unlock(&gr_inode_lock);
52859+
52860+ return;
52861+}
52862+
52863+static void
52864+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
52865+ const ino_t newinode, const dev_t newdevice,
52866+ struct acl_subject_label *subj)
52867+{
52868+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
52869+ struct acl_object_label *match;
52870+
52871+ match = subj->obj_hash[index];
52872+
52873+ while (match && (match->inode != oldinode ||
52874+ match->device != olddevice ||
52875+ !(match->mode & GR_DELETED)))
52876+ match = match->next;
52877+
52878+ if (match && (match->inode == oldinode)
52879+ && (match->device == olddevice)
52880+ && (match->mode & GR_DELETED)) {
52881+ if (match->prev == NULL) {
52882+ subj->obj_hash[index] = match->next;
52883+ if (match->next != NULL)
52884+ match->next->prev = NULL;
52885+ } else {
52886+ match->prev->next = match->next;
52887+ if (match->next != NULL)
52888+ match->next->prev = match->prev;
52889+ }
52890+ match->prev = NULL;
52891+ match->next = NULL;
52892+ match->inode = newinode;
52893+ match->device = newdevice;
52894+ match->mode &= ~GR_DELETED;
52895+
52896+ insert_acl_obj_label(match, subj);
52897+ }
52898+
52899+ return;
52900+}
52901+
52902+static void
52903+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
52904+ const ino_t newinode, const dev_t newdevice,
52905+ struct acl_role_label *role)
52906+{
52907+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
52908+ struct acl_subject_label *match;
52909+
52910+ match = role->subj_hash[index];
52911+
52912+ while (match && (match->inode != oldinode ||
52913+ match->device != olddevice ||
52914+ !(match->mode & GR_DELETED)))
52915+ match = match->next;
52916+
52917+ if (match && (match->inode == oldinode)
52918+ && (match->device == olddevice)
52919+ && (match->mode & GR_DELETED)) {
52920+ if (match->prev == NULL) {
52921+ role->subj_hash[index] = match->next;
52922+ if (match->next != NULL)
52923+ match->next->prev = NULL;
52924+ } else {
52925+ match->prev->next = match->next;
52926+ if (match->next != NULL)
52927+ match->next->prev = match->prev;
52928+ }
52929+ match->prev = NULL;
52930+ match->next = NULL;
52931+ match->inode = newinode;
52932+ match->device = newdevice;
52933+ match->mode &= ~GR_DELETED;
52934+
52935+ insert_acl_subj_label(match, role);
52936+ }
52937+
52938+ return;
52939+}
52940+
52941+static void
52942+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
52943+ const ino_t newinode, const dev_t newdevice)
52944+{
52945+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
52946+ struct inodev_entry *match;
52947+
52948+ match = inodev_set.i_hash[index];
52949+
52950+ while (match && (match->nentry->inode != oldinode ||
52951+ match->nentry->device != olddevice || !match->nentry->deleted))
52952+ match = match->next;
52953+
52954+ if (match && (match->nentry->inode == oldinode)
52955+ && (match->nentry->device == olddevice) &&
52956+ match->nentry->deleted) {
52957+ if (match->prev == NULL) {
52958+ inodev_set.i_hash[index] = match->next;
52959+ if (match->next != NULL)
52960+ match->next->prev = NULL;
52961+ } else {
52962+ match->prev->next = match->next;
52963+ if (match->next != NULL)
52964+ match->next->prev = match->prev;
52965+ }
52966+ match->prev = NULL;
52967+ match->next = NULL;
52968+ match->nentry->inode = newinode;
52969+ match->nentry->device = newdevice;
52970+ match->nentry->deleted = 0;
52971+
52972+ insert_inodev_entry(match);
52973+ }
52974+
52975+ return;
52976+}
52977+
52978+static void
52979+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
52980+{
52981+ struct acl_subject_label *subj;
52982+ struct acl_role_label *role;
52983+ unsigned int x;
52984+
52985+ FOR_EACH_ROLE_START(role)
52986+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
52987+
52988+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
52989+ if ((subj->inode == ino) && (subj->device == dev)) {
52990+ subj->inode = ino;
52991+ subj->device = dev;
52992+ }
52993+ FOR_EACH_NESTED_SUBJECT_END(subj)
52994+ FOR_EACH_SUBJECT_START(role, subj, x)
52995+ update_acl_obj_label(matchn->inode, matchn->device,
52996+ ino, dev, subj);
52997+ FOR_EACH_SUBJECT_END(subj,x)
52998+ FOR_EACH_ROLE_END(role)
52999+
53000+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
53001+
53002+ return;
53003+}
53004+
53005+static void
53006+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
53007+ const struct vfsmount *mnt)
53008+{
53009+ ino_t ino = dentry->d_inode->i_ino;
53010+ dev_t dev = __get_dev(dentry);
53011+
53012+ __do_handle_create(matchn, ino, dev);
53013+
53014+ return;
53015+}
53016+
53017+void
53018+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53019+{
53020+ struct name_entry *matchn;
53021+
53022+ if (unlikely(!(gr_status & GR_READY)))
53023+ return;
53024+
53025+ preempt_disable();
53026+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
53027+
53028+ if (unlikely((unsigned long)matchn)) {
53029+ write_lock(&gr_inode_lock);
53030+ do_handle_create(matchn, dentry, mnt);
53031+ write_unlock(&gr_inode_lock);
53032+ }
53033+ preempt_enable();
53034+
53035+ return;
53036+}
53037+
53038+void
53039+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
53040+{
53041+ struct name_entry *matchn;
53042+
53043+ if (unlikely(!(gr_status & GR_READY)))
53044+ return;
53045+
53046+ preempt_disable();
53047+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
53048+
53049+ if (unlikely((unsigned long)matchn)) {
53050+ write_lock(&gr_inode_lock);
53051+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
53052+ write_unlock(&gr_inode_lock);
53053+ }
53054+ preempt_enable();
53055+
53056+ return;
53057+}
53058+
53059+void
53060+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53061+ struct dentry *old_dentry,
53062+ struct dentry *new_dentry,
53063+ struct vfsmount *mnt, const __u8 replace)
53064+{
53065+ struct name_entry *matchn;
53066+ struct inodev_entry *inodev;
53067+ struct inode *inode = new_dentry->d_inode;
53068+ ino_t old_ino = old_dentry->d_inode->i_ino;
53069+ dev_t old_dev = __get_dev(old_dentry);
53070+
53071+ /* vfs_rename swaps the name and parent link for old_dentry and
53072+ new_dentry
53073+ at this point, old_dentry has the new name, parent link, and inode
53074+ for the renamed file
53075+ if a file is being replaced by a rename, new_dentry has the inode
53076+ and name for the replaced file
53077+ */
53078+
53079+ if (unlikely(!(gr_status & GR_READY)))
53080+ return;
53081+
53082+ preempt_disable();
53083+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
53084+
53085+ /* we wouldn't have to check d_inode if it weren't for
53086+ NFS silly-renaming
53087+ */
53088+
53089+ write_lock(&gr_inode_lock);
53090+ if (unlikely(replace && inode)) {
53091+ ino_t new_ino = inode->i_ino;
53092+ dev_t new_dev = __get_dev(new_dentry);
53093+
53094+ inodev = lookup_inodev_entry(new_ino, new_dev);
53095+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
53096+ do_handle_delete(inodev, new_ino, new_dev);
53097+ }
53098+
53099+ inodev = lookup_inodev_entry(old_ino, old_dev);
53100+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
53101+ do_handle_delete(inodev, old_ino, old_dev);
53102+
53103+ if (unlikely((unsigned long)matchn))
53104+ do_handle_create(matchn, old_dentry, mnt);
53105+
53106+ write_unlock(&gr_inode_lock);
53107+ preempt_enable();
53108+
53109+ return;
53110+}
53111+
53112+static int
53113+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
53114+ unsigned char **sum)
53115+{
53116+ struct acl_role_label *r;
53117+ struct role_allowed_ip *ipp;
53118+ struct role_transition *trans;
53119+ unsigned int i;
53120+ int found = 0;
53121+ u32 curr_ip = current->signal->curr_ip;
53122+
53123+ current->signal->saved_ip = curr_ip;
53124+
53125+ /* check transition table */
53126+
53127+ for (trans = current->role->transitions; trans; trans = trans->next) {
53128+ if (!strcmp(rolename, trans->rolename)) {
53129+ found = 1;
53130+ break;
53131+ }
53132+ }
53133+
53134+ if (!found)
53135+ return 0;
53136+
53137+ /* handle special roles that do not require authentication
53138+ and check ip */
53139+
53140+ FOR_EACH_ROLE_START(r)
53141+ if (!strcmp(rolename, r->rolename) &&
53142+ (r->roletype & GR_ROLE_SPECIAL)) {
53143+ found = 0;
53144+ if (r->allowed_ips != NULL) {
53145+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
53146+ if ((ntohl(curr_ip) & ipp->netmask) ==
53147+ (ntohl(ipp->addr) & ipp->netmask))
53148+ found = 1;
53149+ }
53150+ } else
53151+ found = 2;
53152+ if (!found)
53153+ return 0;
53154+
53155+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
53156+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
53157+ *salt = NULL;
53158+ *sum = NULL;
53159+ return 1;
53160+ }
53161+ }
53162+ FOR_EACH_ROLE_END(r)
53163+
53164+ for (i = 0; i < num_sprole_pws; i++) {
53165+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
53166+ *salt = acl_special_roles[i]->salt;
53167+ *sum = acl_special_roles[i]->sum;
53168+ return 1;
53169+ }
53170+ }
53171+
53172+ return 0;
53173+}
53174+
53175+static void
53176+assign_special_role(char *rolename)
53177+{
53178+ struct acl_object_label *obj;
53179+ struct acl_role_label *r;
53180+ struct acl_role_label *assigned = NULL;
53181+ struct task_struct *tsk;
53182+ struct file *filp;
53183+
53184+ FOR_EACH_ROLE_START(r)
53185+ if (!strcmp(rolename, r->rolename) &&
53186+ (r->roletype & GR_ROLE_SPECIAL)) {
53187+ assigned = r;
53188+ break;
53189+ }
53190+ FOR_EACH_ROLE_END(r)
53191+
53192+ if (!assigned)
53193+ return;
53194+
53195+ read_lock(&tasklist_lock);
53196+ read_lock(&grsec_exec_file_lock);
53197+
53198+ tsk = current->real_parent;
53199+ if (tsk == NULL)
53200+ goto out_unlock;
53201+
53202+ filp = tsk->exec_file;
53203+ if (filp == NULL)
53204+ goto out_unlock;
53205+
53206+ tsk->is_writable = 0;
53207+
53208+ tsk->acl_sp_role = 1;
53209+ tsk->acl_role_id = ++acl_sp_role_value;
53210+ tsk->role = assigned;
53211+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
53212+
53213+ /* ignore additional mmap checks for processes that are writable
53214+ by the default ACL */
53215+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53216+ if (unlikely(obj->mode & GR_WRITE))
53217+ tsk->is_writable = 1;
53218+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
53219+ if (unlikely(obj->mode & GR_WRITE))
53220+ tsk->is_writable = 1;
53221+
53222+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53223+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
53224+#endif
53225+
53226+out_unlock:
53227+ read_unlock(&grsec_exec_file_lock);
53228+ read_unlock(&tasklist_lock);
53229+ return;
53230+}
53231+
53232+int gr_check_secure_terminal(struct task_struct *task)
53233+{
53234+ struct task_struct *p, *p2, *p3;
53235+ struct files_struct *files;
53236+ struct fdtable *fdt;
53237+ struct file *our_file = NULL, *file;
53238+ int i;
53239+
53240+ if (task->signal->tty == NULL)
53241+ return 1;
53242+
53243+ files = get_files_struct(task);
53244+ if (files != NULL) {
53245+ rcu_read_lock();
53246+ fdt = files_fdtable(files);
53247+ for (i=0; i < fdt->max_fds; i++) {
53248+ file = fcheck_files(files, i);
53249+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
53250+ get_file(file);
53251+ our_file = file;
53252+ }
53253+ }
53254+ rcu_read_unlock();
53255+ put_files_struct(files);
53256+ }
53257+
53258+ if (our_file == NULL)
53259+ return 1;
53260+
53261+ read_lock(&tasklist_lock);
53262+ do_each_thread(p2, p) {
53263+ files = get_files_struct(p);
53264+ if (files == NULL ||
53265+ (p->signal && p->signal->tty == task->signal->tty)) {
53266+ if (files != NULL)
53267+ put_files_struct(files);
53268+ continue;
53269+ }
53270+ rcu_read_lock();
53271+ fdt = files_fdtable(files);
53272+ for (i=0; i < fdt->max_fds; i++) {
53273+ file = fcheck_files(files, i);
53274+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
53275+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
53276+ p3 = task;
53277+ while (p3->pid > 0) {
53278+ if (p3 == p)
53279+ break;
53280+ p3 = p3->real_parent;
53281+ }
53282+ if (p3 == p)
53283+ break;
53284+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
53285+ gr_handle_alertkill(p);
53286+ rcu_read_unlock();
53287+ put_files_struct(files);
53288+ read_unlock(&tasklist_lock);
53289+ fput(our_file);
53290+ return 0;
53291+ }
53292+ }
53293+ rcu_read_unlock();
53294+ put_files_struct(files);
53295+ } while_each_thread(p2, p);
53296+ read_unlock(&tasklist_lock);
53297+
53298+ fput(our_file);
53299+ return 1;
53300+}
53301+
53302+ssize_t
53303+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
53304+{
53305+ struct gr_arg_wrapper uwrap;
53306+ unsigned char *sprole_salt = NULL;
53307+ unsigned char *sprole_sum = NULL;
53308+ int error = sizeof (struct gr_arg_wrapper);
53309+ int error2 = 0;
53310+
53311+ mutex_lock(&gr_dev_mutex);
53312+
53313+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
53314+ error = -EPERM;
53315+ goto out;
53316+ }
53317+
53318+ if (count != sizeof (struct gr_arg_wrapper)) {
53319+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
53320+ error = -EINVAL;
53321+ goto out;
53322+ }
53323+
53324+
53325+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
53326+ gr_auth_expires = 0;
53327+ gr_auth_attempts = 0;
53328+ }
53329+
53330+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
53331+ error = -EFAULT;
53332+ goto out;
53333+ }
53334+
53335+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
53336+ error = -EINVAL;
53337+ goto out;
53338+ }
53339+
53340+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
53341+ error = -EFAULT;
53342+ goto out;
53343+ }
53344+
53345+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53346+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53347+ time_after(gr_auth_expires, get_seconds())) {
53348+ error = -EBUSY;
53349+ goto out;
53350+ }
53351+
53352+ /* if non-root trying to do anything other than use a special role,
53353+ do not attempt authentication, do not count towards authentication
53354+ locking
53355+ */
53356+
53357+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
53358+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53359+ current_uid()) {
53360+ error = -EPERM;
53361+ goto out;
53362+ }
53363+
53364+ /* ensure pw and special role name are null terminated */
53365+
53366+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
53367+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
53368+
53369+ /* Okay.
53370+ * We have our enough of the argument structure..(we have yet
53371+ * to copy_from_user the tables themselves) . Copy the tables
53372+ * only if we need them, i.e. for loading operations. */
53373+
53374+ switch (gr_usermode->mode) {
53375+ case GR_STATUS:
53376+ if (gr_status & GR_READY) {
53377+ error = 1;
53378+ if (!gr_check_secure_terminal(current))
53379+ error = 3;
53380+ } else
53381+ error = 2;
53382+ goto out;
53383+ case GR_SHUTDOWN:
53384+ if ((gr_status & GR_READY)
53385+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53386+ pax_open_kernel();
53387+ gr_status &= ~GR_READY;
53388+ pax_close_kernel();
53389+
53390+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
53391+ free_variables();
53392+ memset(gr_usermode, 0, sizeof (struct gr_arg));
53393+ memset(gr_system_salt, 0, GR_SALT_LEN);
53394+ memset(gr_system_sum, 0, GR_SHA_LEN);
53395+ } else if (gr_status & GR_READY) {
53396+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
53397+ error = -EPERM;
53398+ } else {
53399+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
53400+ error = -EAGAIN;
53401+ }
53402+ break;
53403+ case GR_ENABLE:
53404+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
53405+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
53406+ else {
53407+ if (gr_status & GR_READY)
53408+ error = -EAGAIN;
53409+ else
53410+ error = error2;
53411+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
53412+ }
53413+ break;
53414+ case GR_RELOAD:
53415+ if (!(gr_status & GR_READY)) {
53416+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
53417+ error = -EAGAIN;
53418+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53419+ preempt_disable();
53420+
53421+ pax_open_kernel();
53422+ gr_status &= ~GR_READY;
53423+ pax_close_kernel();
53424+
53425+ free_variables();
53426+ if (!(error2 = gracl_init(gr_usermode))) {
53427+ preempt_enable();
53428+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
53429+ } else {
53430+ preempt_enable();
53431+ error = error2;
53432+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53433+ }
53434+ } else {
53435+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53436+ error = -EPERM;
53437+ }
53438+ break;
53439+ case GR_SEGVMOD:
53440+ if (unlikely(!(gr_status & GR_READY))) {
53441+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
53442+ error = -EAGAIN;
53443+ break;
53444+ }
53445+
53446+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53447+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
53448+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
53449+ struct acl_subject_label *segvacl;
53450+ segvacl =
53451+ lookup_acl_subj_label(gr_usermode->segv_inode,
53452+ gr_usermode->segv_device,
53453+ current->role);
53454+ if (segvacl) {
53455+ segvacl->crashes = 0;
53456+ segvacl->expires = 0;
53457+ }
53458+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
53459+ gr_remove_uid(gr_usermode->segv_uid);
53460+ }
53461+ } else {
53462+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
53463+ error = -EPERM;
53464+ }
53465+ break;
53466+ case GR_SPROLE:
53467+ case GR_SPROLEPAM:
53468+ if (unlikely(!(gr_status & GR_READY))) {
53469+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
53470+ error = -EAGAIN;
53471+ break;
53472+ }
53473+
53474+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
53475+ current->role->expires = 0;
53476+ current->role->auth_attempts = 0;
53477+ }
53478+
53479+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53480+ time_after(current->role->expires, get_seconds())) {
53481+ error = -EBUSY;
53482+ goto out;
53483+ }
53484+
53485+ if (lookup_special_role_auth
53486+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
53487+ && ((!sprole_salt && !sprole_sum)
53488+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
53489+ char *p = "";
53490+ assign_special_role(gr_usermode->sp_role);
53491+ read_lock(&tasklist_lock);
53492+ if (current->real_parent)
53493+ p = current->real_parent->role->rolename;
53494+ read_unlock(&tasklist_lock);
53495+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
53496+ p, acl_sp_role_value);
53497+ } else {
53498+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
53499+ error = -EPERM;
53500+ if(!(current->role->auth_attempts++))
53501+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53502+
53503+ goto out;
53504+ }
53505+ break;
53506+ case GR_UNSPROLE:
53507+ if (unlikely(!(gr_status & GR_READY))) {
53508+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
53509+ error = -EAGAIN;
53510+ break;
53511+ }
53512+
53513+ if (current->role->roletype & GR_ROLE_SPECIAL) {
53514+ char *p = "";
53515+ int i = 0;
53516+
53517+ read_lock(&tasklist_lock);
53518+ if (current->real_parent) {
53519+ p = current->real_parent->role->rolename;
53520+ i = current->real_parent->acl_role_id;
53521+ }
53522+ read_unlock(&tasklist_lock);
53523+
53524+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
53525+ gr_set_acls(1);
53526+ } else {
53527+ error = -EPERM;
53528+ goto out;
53529+ }
53530+ break;
53531+ default:
53532+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
53533+ error = -EINVAL;
53534+ break;
53535+ }
53536+
53537+ if (error != -EPERM)
53538+ goto out;
53539+
53540+ if(!(gr_auth_attempts++))
53541+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53542+
53543+ out:
53544+ mutex_unlock(&gr_dev_mutex);
53545+ return error;
53546+}
53547+
53548+/* must be called with
53549+ rcu_read_lock();
53550+ read_lock(&tasklist_lock);
53551+ read_lock(&grsec_exec_file_lock);
53552+*/
53553+int gr_apply_subject_to_task(struct task_struct *task)
53554+{
53555+ struct acl_object_label *obj;
53556+ char *tmpname;
53557+ struct acl_subject_label *tmpsubj;
53558+ struct file *filp;
53559+ struct name_entry *nmatch;
53560+
53561+ filp = task->exec_file;
53562+ if (filp == NULL)
53563+ return 0;
53564+
53565+ /* the following is to apply the correct subject
53566+ on binaries running when the RBAC system
53567+ is enabled, when the binaries have been
53568+ replaced or deleted since their execution
53569+ -----
53570+ when the RBAC system starts, the inode/dev
53571+ from exec_file will be one the RBAC system
53572+ is unaware of. It only knows the inode/dev
53573+ of the present file on disk, or the absence
53574+ of it.
53575+ */
53576+ preempt_disable();
53577+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
53578+
53579+ nmatch = lookup_name_entry(tmpname);
53580+ preempt_enable();
53581+ tmpsubj = NULL;
53582+ if (nmatch) {
53583+ if (nmatch->deleted)
53584+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
53585+ else
53586+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
53587+ if (tmpsubj != NULL)
53588+ task->acl = tmpsubj;
53589+ }
53590+ if (tmpsubj == NULL)
53591+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
53592+ task->role);
53593+ if (task->acl) {
53594+ task->is_writable = 0;
53595+ /* ignore additional mmap checks for processes that are writable
53596+ by the default ACL */
53597+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53598+ if (unlikely(obj->mode & GR_WRITE))
53599+ task->is_writable = 1;
53600+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53601+ if (unlikely(obj->mode & GR_WRITE))
53602+ task->is_writable = 1;
53603+
53604+ gr_set_proc_res(task);
53605+
53606+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53607+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53608+#endif
53609+ } else {
53610+ return 1;
53611+ }
53612+
53613+ return 0;
53614+}
53615+
53616+int
53617+gr_set_acls(const int type)
53618+{
53619+ struct task_struct *task, *task2;
53620+ struct acl_role_label *role = current->role;
53621+ __u16 acl_role_id = current->acl_role_id;
53622+ const struct cred *cred;
53623+ int ret;
53624+
53625+ rcu_read_lock();
53626+ read_lock(&tasklist_lock);
53627+ read_lock(&grsec_exec_file_lock);
53628+ do_each_thread(task2, task) {
53629+ /* check to see if we're called from the exit handler,
53630+ if so, only replace ACLs that have inherited the admin
53631+ ACL */
53632+
53633+ if (type && (task->role != role ||
53634+ task->acl_role_id != acl_role_id))
53635+ continue;
53636+
53637+ task->acl_role_id = 0;
53638+ task->acl_sp_role = 0;
53639+
53640+ if (task->exec_file) {
53641+ cred = __task_cred(task);
53642+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
53643+ ret = gr_apply_subject_to_task(task);
53644+ if (ret) {
53645+ read_unlock(&grsec_exec_file_lock);
53646+ read_unlock(&tasklist_lock);
53647+ rcu_read_unlock();
53648+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
53649+ return ret;
53650+ }
53651+ } else {
53652+ // it's a kernel process
53653+ task->role = kernel_role;
53654+ task->acl = kernel_role->root_label;
53655+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
53656+ task->acl->mode &= ~GR_PROCFIND;
53657+#endif
53658+ }
53659+ } while_each_thread(task2, task);
53660+ read_unlock(&grsec_exec_file_lock);
53661+ read_unlock(&tasklist_lock);
53662+ rcu_read_unlock();
53663+
53664+ return 0;
53665+}
53666+
53667+void
53668+gr_learn_resource(const struct task_struct *task,
53669+ const int res, const unsigned long wanted, const int gt)
53670+{
53671+ struct acl_subject_label *acl;
53672+ const struct cred *cred;
53673+
53674+ if (unlikely((gr_status & GR_READY) &&
53675+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
53676+ goto skip_reslog;
53677+
53678+#ifdef CONFIG_GRKERNSEC_RESLOG
53679+ gr_log_resource(task, res, wanted, gt);
53680+#endif
53681+ skip_reslog:
53682+
53683+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
53684+ return;
53685+
53686+ acl = task->acl;
53687+
53688+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
53689+ !(acl->resmask & (1 << (unsigned short) res))))
53690+ return;
53691+
53692+ if (wanted >= acl->res[res].rlim_cur) {
53693+ unsigned long res_add;
53694+
53695+ res_add = wanted;
53696+ switch (res) {
53697+ case RLIMIT_CPU:
53698+ res_add += GR_RLIM_CPU_BUMP;
53699+ break;
53700+ case RLIMIT_FSIZE:
53701+ res_add += GR_RLIM_FSIZE_BUMP;
53702+ break;
53703+ case RLIMIT_DATA:
53704+ res_add += GR_RLIM_DATA_BUMP;
53705+ break;
53706+ case RLIMIT_STACK:
53707+ res_add += GR_RLIM_STACK_BUMP;
53708+ break;
53709+ case RLIMIT_CORE:
53710+ res_add += GR_RLIM_CORE_BUMP;
53711+ break;
53712+ case RLIMIT_RSS:
53713+ res_add += GR_RLIM_RSS_BUMP;
53714+ break;
53715+ case RLIMIT_NPROC:
53716+ res_add += GR_RLIM_NPROC_BUMP;
53717+ break;
53718+ case RLIMIT_NOFILE:
53719+ res_add += GR_RLIM_NOFILE_BUMP;
53720+ break;
53721+ case RLIMIT_MEMLOCK:
53722+ res_add += GR_RLIM_MEMLOCK_BUMP;
53723+ break;
53724+ case RLIMIT_AS:
53725+ res_add += GR_RLIM_AS_BUMP;
53726+ break;
53727+ case RLIMIT_LOCKS:
53728+ res_add += GR_RLIM_LOCKS_BUMP;
53729+ break;
53730+ case RLIMIT_SIGPENDING:
53731+ res_add += GR_RLIM_SIGPENDING_BUMP;
53732+ break;
53733+ case RLIMIT_MSGQUEUE:
53734+ res_add += GR_RLIM_MSGQUEUE_BUMP;
53735+ break;
53736+ case RLIMIT_NICE:
53737+ res_add += GR_RLIM_NICE_BUMP;
53738+ break;
53739+ case RLIMIT_RTPRIO:
53740+ res_add += GR_RLIM_RTPRIO_BUMP;
53741+ break;
53742+ case RLIMIT_RTTIME:
53743+ res_add += GR_RLIM_RTTIME_BUMP;
53744+ break;
53745+ }
53746+
53747+ acl->res[res].rlim_cur = res_add;
53748+
53749+ if (wanted > acl->res[res].rlim_max)
53750+ acl->res[res].rlim_max = res_add;
53751+
53752+ /* only log the subject filename, since resource logging is supported for
53753+ single-subject learning only */
53754+ rcu_read_lock();
53755+ cred = __task_cred(task);
53756+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
53757+ task->role->roletype, cred->uid, cred->gid, acl->filename,
53758+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
53759+ "", (unsigned long) res, &task->signal->saved_ip);
53760+ rcu_read_unlock();
53761+ }
53762+
53763+ return;
53764+}
53765+
53766+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
53767+void
53768+pax_set_initial_flags(struct linux_binprm *bprm)
53769+{
53770+ struct task_struct *task = current;
53771+ struct acl_subject_label *proc;
53772+ unsigned long flags;
53773+
53774+ if (unlikely(!(gr_status & GR_READY)))
53775+ return;
53776+
53777+ flags = pax_get_flags(task);
53778+
53779+ proc = task->acl;
53780+
53781+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
53782+ flags &= ~MF_PAX_PAGEEXEC;
53783+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
53784+ flags &= ~MF_PAX_SEGMEXEC;
53785+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
53786+ flags &= ~MF_PAX_RANDMMAP;
53787+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
53788+ flags &= ~MF_PAX_EMUTRAMP;
53789+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
53790+ flags &= ~MF_PAX_MPROTECT;
53791+
53792+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
53793+ flags |= MF_PAX_PAGEEXEC;
53794+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
53795+ flags |= MF_PAX_SEGMEXEC;
53796+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
53797+ flags |= MF_PAX_RANDMMAP;
53798+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
53799+ flags |= MF_PAX_EMUTRAMP;
53800+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
53801+ flags |= MF_PAX_MPROTECT;
53802+
53803+ pax_set_flags(task, flags);
53804+
53805+ return;
53806+}
53807+#endif
53808+
53809+int
53810+gr_handle_proc_ptrace(struct task_struct *task)
53811+{
53812+ struct file *filp;
53813+ struct task_struct *tmp = task;
53814+ struct task_struct *curtemp = current;
53815+ __u32 retmode;
53816+
53817+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
53818+ if (unlikely(!(gr_status & GR_READY)))
53819+ return 0;
53820+#endif
53821+
53822+ read_lock(&tasklist_lock);
53823+ read_lock(&grsec_exec_file_lock);
53824+ filp = task->exec_file;
53825+
53826+ while (tmp->pid > 0) {
53827+ if (tmp == curtemp)
53828+ break;
53829+ tmp = tmp->real_parent;
53830+ }
53831+
53832+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
53833+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
53834+ read_unlock(&grsec_exec_file_lock);
53835+ read_unlock(&tasklist_lock);
53836+ return 1;
53837+ }
53838+
53839+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53840+ if (!(gr_status & GR_READY)) {
53841+ read_unlock(&grsec_exec_file_lock);
53842+ read_unlock(&tasklist_lock);
53843+ return 0;
53844+ }
53845+#endif
53846+
53847+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
53848+ read_unlock(&grsec_exec_file_lock);
53849+ read_unlock(&tasklist_lock);
53850+
53851+ if (retmode & GR_NOPTRACE)
53852+ return 1;
53853+
53854+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
53855+ && (current->acl != task->acl || (current->acl != current->role->root_label
53856+ && current->pid != task->pid)))
53857+ return 1;
53858+
53859+ return 0;
53860+}
53861+
53862+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
53863+{
53864+ if (unlikely(!(gr_status & GR_READY)))
53865+ return;
53866+
53867+ if (!(current->role->roletype & GR_ROLE_GOD))
53868+ return;
53869+
53870+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
53871+ p->role->rolename, gr_task_roletype_to_char(p),
53872+ p->acl->filename);
53873+}
53874+
53875+int
53876+gr_handle_ptrace(struct task_struct *task, const long request)
53877+{
53878+ struct task_struct *tmp = task;
53879+ struct task_struct *curtemp = current;
53880+ __u32 retmode;
53881+
53882+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
53883+ if (unlikely(!(gr_status & GR_READY)))
53884+ return 0;
53885+#endif
53886+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
53887+ read_lock(&tasklist_lock);
53888+ while (tmp->pid > 0) {
53889+ if (tmp == curtemp)
53890+ break;
53891+ tmp = tmp->real_parent;
53892+ }
53893+
53894+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
53895+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
53896+ read_unlock(&tasklist_lock);
53897+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53898+ return 1;
53899+ }
53900+ read_unlock(&tasklist_lock);
53901+ }
53902+
53903+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53904+ if (!(gr_status & GR_READY))
53905+ return 0;
53906+#endif
53907+
53908+ read_lock(&grsec_exec_file_lock);
53909+ if (unlikely(!task->exec_file)) {
53910+ read_unlock(&grsec_exec_file_lock);
53911+ return 0;
53912+ }
53913+
53914+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
53915+ read_unlock(&grsec_exec_file_lock);
53916+
53917+ if (retmode & GR_NOPTRACE) {
53918+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53919+ return 1;
53920+ }
53921+
53922+ if (retmode & GR_PTRACERD) {
53923+ switch (request) {
53924+ case PTRACE_SEIZE:
53925+ case PTRACE_POKETEXT:
53926+ case PTRACE_POKEDATA:
53927+ case PTRACE_POKEUSR:
53928+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
53929+ case PTRACE_SETREGS:
53930+ case PTRACE_SETFPREGS:
53931+#endif
53932+#ifdef CONFIG_X86
53933+ case PTRACE_SETFPXREGS:
53934+#endif
53935+#ifdef CONFIG_ALTIVEC
53936+ case PTRACE_SETVRREGS:
53937+#endif
53938+ return 1;
53939+ default:
53940+ return 0;
53941+ }
53942+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
53943+ !(current->role->roletype & GR_ROLE_GOD) &&
53944+ (current->acl != task->acl)) {
53945+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53946+ return 1;
53947+ }
53948+
53949+ return 0;
53950+}
53951+
53952+static int is_writable_mmap(const struct file *filp)
53953+{
53954+ struct task_struct *task = current;
53955+ struct acl_object_label *obj, *obj2;
53956+
53957+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
53958+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
53959+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53960+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
53961+ task->role->root_label);
53962+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
53963+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
53964+ return 1;
53965+ }
53966+ }
53967+ return 0;
53968+}
53969+
53970+int
53971+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
53972+{
53973+ __u32 mode;
53974+
53975+ if (unlikely(!file || !(prot & PROT_EXEC)))
53976+ return 1;
53977+
53978+ if (is_writable_mmap(file))
53979+ return 0;
53980+
53981+ mode =
53982+ gr_search_file(file->f_path.dentry,
53983+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
53984+ file->f_path.mnt);
53985+
53986+ if (!gr_tpe_allow(file))
53987+ return 0;
53988+
53989+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
53990+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53991+ return 0;
53992+ } else if (unlikely(!(mode & GR_EXEC))) {
53993+ return 0;
53994+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
53995+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53996+ return 1;
53997+ }
53998+
53999+ return 1;
54000+}
54001+
54002+int
54003+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
54004+{
54005+ __u32 mode;
54006+
54007+ if (unlikely(!file || !(prot & PROT_EXEC)))
54008+ return 1;
54009+
54010+ if (is_writable_mmap(file))
54011+ return 0;
54012+
54013+ mode =
54014+ gr_search_file(file->f_path.dentry,
54015+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
54016+ file->f_path.mnt);
54017+
54018+ if (!gr_tpe_allow(file))
54019+ return 0;
54020+
54021+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
54022+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54023+ return 0;
54024+ } else if (unlikely(!(mode & GR_EXEC))) {
54025+ return 0;
54026+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
54027+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54028+ return 1;
54029+ }
54030+
54031+ return 1;
54032+}
54033+
54034+void
54035+gr_acl_handle_psacct(struct task_struct *task, const long code)
54036+{
54037+ unsigned long runtime;
54038+ unsigned long cputime;
54039+ unsigned int wday, cday;
54040+ __u8 whr, chr;
54041+ __u8 wmin, cmin;
54042+ __u8 wsec, csec;
54043+ struct timespec timeval;
54044+
54045+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
54046+ !(task->acl->mode & GR_PROCACCT)))
54047+ return;
54048+
54049+ do_posix_clock_monotonic_gettime(&timeval);
54050+ runtime = timeval.tv_sec - task->start_time.tv_sec;
54051+ wday = runtime / (3600 * 24);
54052+ runtime -= wday * (3600 * 24);
54053+ whr = runtime / 3600;
54054+ runtime -= whr * 3600;
54055+ wmin = runtime / 60;
54056+ runtime -= wmin * 60;
54057+ wsec = runtime;
54058+
54059+ cputime = (task->utime + task->stime) / HZ;
54060+ cday = cputime / (3600 * 24);
54061+ cputime -= cday * (3600 * 24);
54062+ chr = cputime / 3600;
54063+ cputime -= chr * 3600;
54064+ cmin = cputime / 60;
54065+ cputime -= cmin * 60;
54066+ csec = cputime;
54067+
54068+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
54069+
54070+ return;
54071+}
54072+
54073+void gr_set_kernel_label(struct task_struct *task)
54074+{
54075+ if (gr_status & GR_READY) {
54076+ task->role = kernel_role;
54077+ task->acl = kernel_role->root_label;
54078+ }
54079+ return;
54080+}
54081+
54082+#ifdef CONFIG_TASKSTATS
54083+int gr_is_taskstats_denied(int pid)
54084+{
54085+ struct task_struct *task;
54086+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54087+ const struct cred *cred;
54088+#endif
54089+ int ret = 0;
54090+
54091+ /* restrict taskstats viewing to un-chrooted root users
54092+ who have the 'view' subject flag if the RBAC system is enabled
54093+ */
54094+
54095+ rcu_read_lock();
54096+ read_lock(&tasklist_lock);
54097+ task = find_task_by_vpid(pid);
54098+ if (task) {
54099+#ifdef CONFIG_GRKERNSEC_CHROOT
54100+ if (proc_is_chrooted(task))
54101+ ret = -EACCES;
54102+#endif
54103+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54104+ cred = __task_cred(task);
54105+#ifdef CONFIG_GRKERNSEC_PROC_USER
54106+ if (cred->uid != 0)
54107+ ret = -EACCES;
54108+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54109+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
54110+ ret = -EACCES;
54111+#endif
54112+#endif
54113+ if (gr_status & GR_READY) {
54114+ if (!(task->acl->mode & GR_VIEW))
54115+ ret = -EACCES;
54116+ }
54117+ } else
54118+ ret = -ENOENT;
54119+
54120+ read_unlock(&tasklist_lock);
54121+ rcu_read_unlock();
54122+
54123+ return ret;
54124+}
54125+#endif
54126+
54127+/* AUXV entries are filled via a descendant of search_binary_handler
54128+ after we've already applied the subject for the target
54129+*/
54130+int gr_acl_enable_at_secure(void)
54131+{
54132+ if (unlikely(!(gr_status & GR_READY)))
54133+ return 0;
54134+
54135+ if (current->acl->mode & GR_ATSECURE)
54136+ return 1;
54137+
54138+ return 0;
54139+}
54140+
54141+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
54142+{
54143+ struct task_struct *task = current;
54144+ struct dentry *dentry = file->f_path.dentry;
54145+ struct vfsmount *mnt = file->f_path.mnt;
54146+ struct acl_object_label *obj, *tmp;
54147+ struct acl_subject_label *subj;
54148+ unsigned int bufsize;
54149+ int is_not_root;
54150+ char *path;
54151+ dev_t dev = __get_dev(dentry);
54152+
54153+ if (unlikely(!(gr_status & GR_READY)))
54154+ return 1;
54155+
54156+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
54157+ return 1;
54158+
54159+ /* ignore Eric Biederman */
54160+ if (IS_PRIVATE(dentry->d_inode))
54161+ return 1;
54162+
54163+ subj = task->acl;
54164+ do {
54165+ obj = lookup_acl_obj_label(ino, dev, subj);
54166+ if (obj != NULL)
54167+ return (obj->mode & GR_FIND) ? 1 : 0;
54168+ } while ((subj = subj->parent_subject));
54169+
54170+ /* this is purely an optimization since we're looking for an object
54171+ for the directory we're doing a readdir on
54172+ if it's possible for any globbed object to match the entry we're
54173+ filling into the directory, then the object we find here will be
54174+ an anchor point with attached globbed objects
54175+ */
54176+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
54177+ if (obj->globbed == NULL)
54178+ return (obj->mode & GR_FIND) ? 1 : 0;
54179+
54180+ is_not_root = ((obj->filename[0] == '/') &&
54181+ (obj->filename[1] == '\0')) ? 0 : 1;
54182+ bufsize = PAGE_SIZE - namelen - is_not_root;
54183+
54184+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
54185+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
54186+ return 1;
54187+
54188+ preempt_disable();
54189+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
54190+ bufsize);
54191+
54192+ bufsize = strlen(path);
54193+
54194+ /* if base is "/", don't append an additional slash */
54195+ if (is_not_root)
54196+ *(path + bufsize) = '/';
54197+ memcpy(path + bufsize + is_not_root, name, namelen);
54198+ *(path + bufsize + namelen + is_not_root) = '\0';
54199+
54200+ tmp = obj->globbed;
54201+ while (tmp) {
54202+ if (!glob_match(tmp->filename, path)) {
54203+ preempt_enable();
54204+ return (tmp->mode & GR_FIND) ? 1 : 0;
54205+ }
54206+ tmp = tmp->next;
54207+ }
54208+ preempt_enable();
54209+ return (obj->mode & GR_FIND) ? 1 : 0;
54210+}
54211+
54212+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
54213+EXPORT_SYMBOL(gr_acl_is_enabled);
54214+#endif
54215+EXPORT_SYMBOL(gr_learn_resource);
54216+EXPORT_SYMBOL(gr_set_kernel_label);
54217+#ifdef CONFIG_SECURITY
54218+EXPORT_SYMBOL(gr_check_user_change);
54219+EXPORT_SYMBOL(gr_check_group_change);
54220+#endif
54221+
54222diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
54223new file mode 100644
54224index 0000000..34fefda
54225--- /dev/null
54226+++ b/grsecurity/gracl_alloc.c
54227@@ -0,0 +1,105 @@
54228+#include <linux/kernel.h>
54229+#include <linux/mm.h>
54230+#include <linux/slab.h>
54231+#include <linux/vmalloc.h>
54232+#include <linux/gracl.h>
54233+#include <linux/grsecurity.h>
54234+
54235+static unsigned long alloc_stack_next = 1;
54236+static unsigned long alloc_stack_size = 1;
54237+static void **alloc_stack;
54238+
54239+static __inline__ int
54240+alloc_pop(void)
54241+{
54242+ if (alloc_stack_next == 1)
54243+ return 0;
54244+
54245+ kfree(alloc_stack[alloc_stack_next - 2]);
54246+
54247+ alloc_stack_next--;
54248+
54249+ return 1;
54250+}
54251+
54252+static __inline__ int
54253+alloc_push(void *buf)
54254+{
54255+ if (alloc_stack_next >= alloc_stack_size)
54256+ return 1;
54257+
54258+ alloc_stack[alloc_stack_next - 1] = buf;
54259+
54260+ alloc_stack_next++;
54261+
54262+ return 0;
54263+}
54264+
54265+void *
54266+acl_alloc(unsigned long len)
54267+{
54268+ void *ret = NULL;
54269+
54270+ if (!len || len > PAGE_SIZE)
54271+ goto out;
54272+
54273+ ret = kmalloc(len, GFP_KERNEL);
54274+
54275+ if (ret) {
54276+ if (alloc_push(ret)) {
54277+ kfree(ret);
54278+ ret = NULL;
54279+ }
54280+ }
54281+
54282+out:
54283+ return ret;
54284+}
54285+
54286+void *
54287+acl_alloc_num(unsigned long num, unsigned long len)
54288+{
54289+ if (!len || (num > (PAGE_SIZE / len)))
54290+ return NULL;
54291+
54292+ return acl_alloc(num * len);
54293+}
54294+
54295+void
54296+acl_free_all(void)
54297+{
54298+ if (gr_acl_is_enabled() || !alloc_stack)
54299+ return;
54300+
54301+ while (alloc_pop()) ;
54302+
54303+ if (alloc_stack) {
54304+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
54305+ kfree(alloc_stack);
54306+ else
54307+ vfree(alloc_stack);
54308+ }
54309+
54310+ alloc_stack = NULL;
54311+ alloc_stack_size = 1;
54312+ alloc_stack_next = 1;
54313+
54314+ return;
54315+}
54316+
54317+int
54318+acl_alloc_stack_init(unsigned long size)
54319+{
54320+ if ((size * sizeof (void *)) <= PAGE_SIZE)
54321+ alloc_stack =
54322+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
54323+ else
54324+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
54325+
54326+ alloc_stack_size = size;
54327+
54328+ if (!alloc_stack)
54329+ return 0;
54330+ else
54331+ return 1;
54332+}
54333diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
54334new file mode 100644
54335index 0000000..6d21049
54336--- /dev/null
54337+++ b/grsecurity/gracl_cap.c
54338@@ -0,0 +1,110 @@
54339+#include <linux/kernel.h>
54340+#include <linux/module.h>
54341+#include <linux/sched.h>
54342+#include <linux/gracl.h>
54343+#include <linux/grsecurity.h>
54344+#include <linux/grinternal.h>
54345+
54346+extern const char *captab_log[];
54347+extern int captab_log_entries;
54348+
54349+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
54350+{
54351+ struct acl_subject_label *curracl;
54352+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54353+ kernel_cap_t cap_audit = __cap_empty_set;
54354+
54355+ if (!gr_acl_is_enabled())
54356+ return 1;
54357+
54358+ curracl = task->acl;
54359+
54360+ cap_drop = curracl->cap_lower;
54361+ cap_mask = curracl->cap_mask;
54362+ cap_audit = curracl->cap_invert_audit;
54363+
54364+ while ((curracl = curracl->parent_subject)) {
54365+ /* if the cap isn't specified in the current computed mask but is specified in the
54366+ current level subject, and is lowered in the current level subject, then add
54367+ it to the set of dropped capabilities
54368+ otherwise, add the current level subject's mask to the current computed mask
54369+ */
54370+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54371+ cap_raise(cap_mask, cap);
54372+ if (cap_raised(curracl->cap_lower, cap))
54373+ cap_raise(cap_drop, cap);
54374+ if (cap_raised(curracl->cap_invert_audit, cap))
54375+ cap_raise(cap_audit, cap);
54376+ }
54377+ }
54378+
54379+ if (!cap_raised(cap_drop, cap)) {
54380+ if (cap_raised(cap_audit, cap))
54381+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
54382+ return 1;
54383+ }
54384+
54385+ curracl = task->acl;
54386+
54387+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
54388+ && cap_raised(cred->cap_effective, cap)) {
54389+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54390+ task->role->roletype, cred->uid,
54391+ cred->gid, task->exec_file ?
54392+ gr_to_filename(task->exec_file->f_path.dentry,
54393+ task->exec_file->f_path.mnt) : curracl->filename,
54394+ curracl->filename, 0UL,
54395+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
54396+ return 1;
54397+ }
54398+
54399+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
54400+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
54401+
54402+ return 0;
54403+}
54404+
54405+int
54406+gr_acl_is_capable(const int cap)
54407+{
54408+ return gr_task_acl_is_capable(current, current_cred(), cap);
54409+}
54410+
54411+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
54412+{
54413+ struct acl_subject_label *curracl;
54414+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54415+
54416+ if (!gr_acl_is_enabled())
54417+ return 1;
54418+
54419+ curracl = task->acl;
54420+
54421+ cap_drop = curracl->cap_lower;
54422+ cap_mask = curracl->cap_mask;
54423+
54424+ while ((curracl = curracl->parent_subject)) {
54425+ /* if the cap isn't specified in the current computed mask but is specified in the
54426+ current level subject, and is lowered in the current level subject, then add
54427+ it to the set of dropped capabilities
54428+ otherwise, add the current level subject's mask to the current computed mask
54429+ */
54430+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54431+ cap_raise(cap_mask, cap);
54432+ if (cap_raised(curracl->cap_lower, cap))
54433+ cap_raise(cap_drop, cap);
54434+ }
54435+ }
54436+
54437+ if (!cap_raised(cap_drop, cap))
54438+ return 1;
54439+
54440+ return 0;
54441+}
54442+
54443+int
54444+gr_acl_is_capable_nolog(const int cap)
54445+{
54446+ return gr_task_acl_is_capable_nolog(current, cap);
54447+}
54448+
54449diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
54450new file mode 100644
54451index 0000000..88d0e87
54452--- /dev/null
54453+++ b/grsecurity/gracl_fs.c
54454@@ -0,0 +1,435 @@
54455+#include <linux/kernel.h>
54456+#include <linux/sched.h>
54457+#include <linux/types.h>
54458+#include <linux/fs.h>
54459+#include <linux/file.h>
54460+#include <linux/stat.h>
54461+#include <linux/grsecurity.h>
54462+#include <linux/grinternal.h>
54463+#include <linux/gracl.h>
54464+
54465+umode_t
54466+gr_acl_umask(void)
54467+{
54468+ if (unlikely(!gr_acl_is_enabled()))
54469+ return 0;
54470+
54471+ return current->role->umask;
54472+}
54473+
54474+__u32
54475+gr_acl_handle_hidden_file(const struct dentry * dentry,
54476+ const struct vfsmount * mnt)
54477+{
54478+ __u32 mode;
54479+
54480+ if (unlikely(!dentry->d_inode))
54481+ return GR_FIND;
54482+
54483+ mode =
54484+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
54485+
54486+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
54487+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54488+ return mode;
54489+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
54490+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54491+ return 0;
54492+ } else if (unlikely(!(mode & GR_FIND)))
54493+ return 0;
54494+
54495+ return GR_FIND;
54496+}
54497+
54498+__u32
54499+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54500+ int acc_mode)
54501+{
54502+ __u32 reqmode = GR_FIND;
54503+ __u32 mode;
54504+
54505+ if (unlikely(!dentry->d_inode))
54506+ return reqmode;
54507+
54508+ if (acc_mode & MAY_APPEND)
54509+ reqmode |= GR_APPEND;
54510+ else if (acc_mode & MAY_WRITE)
54511+ reqmode |= GR_WRITE;
54512+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
54513+ reqmode |= GR_READ;
54514+
54515+ mode =
54516+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
54517+ mnt);
54518+
54519+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54520+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54521+ reqmode & GR_READ ? " reading" : "",
54522+ reqmode & GR_WRITE ? " writing" : reqmode &
54523+ GR_APPEND ? " appending" : "");
54524+ return reqmode;
54525+ } else
54526+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54527+ {
54528+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54529+ reqmode & GR_READ ? " reading" : "",
54530+ reqmode & GR_WRITE ? " writing" : reqmode &
54531+ GR_APPEND ? " appending" : "");
54532+ return 0;
54533+ } else if (unlikely((mode & reqmode) != reqmode))
54534+ return 0;
54535+
54536+ return reqmode;
54537+}
54538+
54539+__u32
54540+gr_acl_handle_creat(const struct dentry * dentry,
54541+ const struct dentry * p_dentry,
54542+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
54543+ const int imode)
54544+{
54545+ __u32 reqmode = GR_WRITE | GR_CREATE;
54546+ __u32 mode;
54547+
54548+ if (acc_mode & MAY_APPEND)
54549+ reqmode |= GR_APPEND;
54550+ // if a directory was required or the directory already exists, then
54551+ // don't count this open as a read
54552+ if ((acc_mode & MAY_READ) &&
54553+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
54554+ reqmode |= GR_READ;
54555+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
54556+ reqmode |= GR_SETID;
54557+
54558+ mode =
54559+ gr_check_create(dentry, p_dentry, p_mnt,
54560+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
54561+
54562+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54563+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
54564+ reqmode & GR_READ ? " reading" : "",
54565+ reqmode & GR_WRITE ? " writing" : reqmode &
54566+ GR_APPEND ? " appending" : "");
54567+ return reqmode;
54568+ } else
54569+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54570+ {
54571+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
54572+ reqmode & GR_READ ? " reading" : "",
54573+ reqmode & GR_WRITE ? " writing" : reqmode &
54574+ GR_APPEND ? " appending" : "");
54575+ return 0;
54576+ } else if (unlikely((mode & reqmode) != reqmode))
54577+ return 0;
54578+
54579+ return reqmode;
54580+}
54581+
54582+__u32
54583+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
54584+ const int fmode)
54585+{
54586+ __u32 mode, reqmode = GR_FIND;
54587+
54588+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
54589+ reqmode |= GR_EXEC;
54590+ if (fmode & S_IWOTH)
54591+ reqmode |= GR_WRITE;
54592+ if (fmode & S_IROTH)
54593+ reqmode |= GR_READ;
54594+
54595+ mode =
54596+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
54597+ mnt);
54598+
54599+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54600+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
54601+ reqmode & GR_READ ? " reading" : "",
54602+ reqmode & GR_WRITE ? " writing" : "",
54603+ reqmode & GR_EXEC ? " executing" : "");
54604+ return reqmode;
54605+ } else
54606+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54607+ {
54608+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
54609+ reqmode & GR_READ ? " reading" : "",
54610+ reqmode & GR_WRITE ? " writing" : "",
54611+ reqmode & GR_EXEC ? " executing" : "");
54612+ return 0;
54613+ } else if (unlikely((mode & reqmode) != reqmode))
54614+ return 0;
54615+
54616+ return reqmode;
54617+}
54618+
54619+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
54620+{
54621+ __u32 mode;
54622+
54623+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
54624+
54625+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
54626+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
54627+ return mode;
54628+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
54629+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
54630+ return 0;
54631+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
54632+ return 0;
54633+
54634+ return (reqmode);
54635+}
54636+
54637+__u32
54638+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
54639+{
54640+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
54641+}
54642+
54643+__u32
54644+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
54645+{
54646+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
54647+}
54648+
54649+__u32
54650+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
54651+{
54652+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
54653+}
54654+
54655+__u32
54656+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
54657+{
54658+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
54659+}
54660+
54661+__u32
54662+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
54663+ umode_t *modeptr)
54664+{
54665+ umode_t mode;
54666+
54667+ *modeptr &= ~gr_acl_umask();
54668+ mode = *modeptr;
54669+
54670+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
54671+ return 1;
54672+
54673+ if (unlikely(mode & (S_ISUID | S_ISGID))) {
54674+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
54675+ GR_CHMOD_ACL_MSG);
54676+ } else {
54677+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
54678+ }
54679+}
54680+
54681+__u32
54682+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
54683+{
54684+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
54685+}
54686+
54687+__u32
54688+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
54689+{
54690+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
54691+}
54692+
54693+__u32
54694+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
54695+{
54696+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
54697+}
54698+
54699+__u32
54700+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
54701+{
54702+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
54703+ GR_UNIXCONNECT_ACL_MSG);
54704+}
54705+
54706+/* hardlinks require at minimum create and link permission,
54707+ any additional privilege required is based on the
54708+ privilege of the file being linked to
54709+*/
54710+__u32
54711+gr_acl_handle_link(const struct dentry * new_dentry,
54712+ const struct dentry * parent_dentry,
54713+ const struct vfsmount * parent_mnt,
54714+ const struct dentry * old_dentry,
54715+ const struct vfsmount * old_mnt, const char *to)
54716+{
54717+ __u32 mode;
54718+ __u32 needmode = GR_CREATE | GR_LINK;
54719+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
54720+
54721+ mode =
54722+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
54723+ old_mnt);
54724+
54725+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
54726+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
54727+ return mode;
54728+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
54729+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
54730+ return 0;
54731+ } else if (unlikely((mode & needmode) != needmode))
54732+ return 0;
54733+
54734+ return 1;
54735+}
54736+
54737+__u32
54738+gr_acl_handle_symlink(const struct dentry * new_dentry,
54739+ const struct dentry * parent_dentry,
54740+ const struct vfsmount * parent_mnt, const char *from)
54741+{
54742+ __u32 needmode = GR_WRITE | GR_CREATE;
54743+ __u32 mode;
54744+
54745+ mode =
54746+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
54747+ GR_CREATE | GR_AUDIT_CREATE |
54748+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
54749+
54750+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
54751+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
54752+ return mode;
54753+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
54754+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
54755+ return 0;
54756+ } else if (unlikely((mode & needmode) != needmode))
54757+ return 0;
54758+
54759+ return (GR_WRITE | GR_CREATE);
54760+}
54761+
54762+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
54763+{
54764+ __u32 mode;
54765+
54766+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
54767+
54768+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
54769+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
54770+ return mode;
54771+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
54772+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
54773+ return 0;
54774+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
54775+ return 0;
54776+
54777+ return (reqmode);
54778+}
54779+
54780+__u32
54781+gr_acl_handle_mknod(const struct dentry * new_dentry,
54782+ const struct dentry * parent_dentry,
54783+ const struct vfsmount * parent_mnt,
54784+ const int mode)
54785+{
54786+ __u32 reqmode = GR_WRITE | GR_CREATE;
54787+ if (unlikely(mode & (S_ISUID | S_ISGID)))
54788+ reqmode |= GR_SETID;
54789+
54790+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
54791+ reqmode, GR_MKNOD_ACL_MSG);
54792+}
54793+
54794+__u32
54795+gr_acl_handle_mkdir(const struct dentry *new_dentry,
54796+ const struct dentry *parent_dentry,
54797+ const struct vfsmount *parent_mnt)
54798+{
54799+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
54800+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
54801+}
54802+
54803+#define RENAME_CHECK_SUCCESS(old, new) \
54804+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
54805+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
54806+
54807+int
54808+gr_acl_handle_rename(struct dentry *new_dentry,
54809+ struct dentry *parent_dentry,
54810+ const struct vfsmount *parent_mnt,
54811+ struct dentry *old_dentry,
54812+ struct inode *old_parent_inode,
54813+ struct vfsmount *old_mnt, const char *newname)
54814+{
54815+ __u32 comp1, comp2;
54816+ int error = 0;
54817+
54818+ if (unlikely(!gr_acl_is_enabled()))
54819+ return 0;
54820+
54821+ if (!new_dentry->d_inode) {
54822+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
54823+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
54824+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
54825+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
54826+ GR_DELETE | GR_AUDIT_DELETE |
54827+ GR_AUDIT_READ | GR_AUDIT_WRITE |
54828+ GR_SUPPRESS, old_mnt);
54829+ } else {
54830+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
54831+ GR_CREATE | GR_DELETE |
54832+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
54833+ GR_AUDIT_READ | GR_AUDIT_WRITE |
54834+ GR_SUPPRESS, parent_mnt);
54835+ comp2 =
54836+ gr_search_file(old_dentry,
54837+ GR_READ | GR_WRITE | GR_AUDIT_READ |
54838+ GR_DELETE | GR_AUDIT_DELETE |
54839+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
54840+ }
54841+
54842+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
54843+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
54844+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
54845+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
54846+ && !(comp2 & GR_SUPPRESS)) {
54847+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
54848+ error = -EACCES;
54849+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
54850+ error = -EACCES;
54851+
54852+ return error;
54853+}
54854+
54855+void
54856+gr_acl_handle_exit(void)
54857+{
54858+ u16 id;
54859+ char *rolename;
54860+ struct file *exec_file;
54861+
54862+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
54863+ !(current->role->roletype & GR_ROLE_PERSIST))) {
54864+ id = current->acl_role_id;
54865+ rolename = current->role->rolename;
54866+ gr_set_acls(1);
54867+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
54868+ }
54869+
54870+ write_lock(&grsec_exec_file_lock);
54871+ exec_file = current->exec_file;
54872+ current->exec_file = NULL;
54873+ write_unlock(&grsec_exec_file_lock);
54874+
54875+ if (exec_file)
54876+ fput(exec_file);
54877+}
54878+
54879+int
54880+gr_acl_handle_procpidmem(const struct task_struct *task)
54881+{
54882+ if (unlikely(!gr_acl_is_enabled()))
54883+ return 0;
54884+
54885+ if (task != current && task->acl->mode & GR_PROTPROCFD)
54886+ return -EACCES;
54887+
54888+ return 0;
54889+}
54890diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
54891new file mode 100644
54892index 0000000..58800a7
54893--- /dev/null
54894+++ b/grsecurity/gracl_ip.c
54895@@ -0,0 +1,384 @@
54896+#include <linux/kernel.h>
54897+#include <asm/uaccess.h>
54898+#include <asm/errno.h>
54899+#include <net/sock.h>
54900+#include <linux/file.h>
54901+#include <linux/fs.h>
54902+#include <linux/net.h>
54903+#include <linux/in.h>
54904+#include <linux/skbuff.h>
54905+#include <linux/ip.h>
54906+#include <linux/udp.h>
54907+#include <linux/types.h>
54908+#include <linux/sched.h>
54909+#include <linux/netdevice.h>
54910+#include <linux/inetdevice.h>
54911+#include <linux/gracl.h>
54912+#include <linux/grsecurity.h>
54913+#include <linux/grinternal.h>
54914+
54915+#define GR_BIND 0x01
54916+#define GR_CONNECT 0x02
54917+#define GR_INVERT 0x04
54918+#define GR_BINDOVERRIDE 0x08
54919+#define GR_CONNECTOVERRIDE 0x10
54920+#define GR_SOCK_FAMILY 0x20
54921+
54922+static const char * gr_protocols[IPPROTO_MAX] = {
54923+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
54924+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
54925+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
54926+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
54927+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
54928+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
54929+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
54930+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
54931+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
54932+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
54933+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
54934+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
54935+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
54936+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
54937+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
54938+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
54939+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
54940+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
54941+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
54942+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
54943+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
54944+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
54945+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
54946+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
54947+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
54948+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
54949+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
54950+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
54951+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
54952+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
54953+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
54954+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
54955+ };
54956+
54957+static const char * gr_socktypes[SOCK_MAX] = {
54958+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
54959+ "unknown:7", "unknown:8", "unknown:9", "packet"
54960+ };
54961+
54962+static const char * gr_sockfamilies[AF_MAX+1] = {
54963+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
54964+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
54965+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
54966+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
54967+ };
54968+
54969+const char *
54970+gr_proto_to_name(unsigned char proto)
54971+{
54972+ return gr_protocols[proto];
54973+}
54974+
54975+const char *
54976+gr_socktype_to_name(unsigned char type)
54977+{
54978+ return gr_socktypes[type];
54979+}
54980+
54981+const char *
54982+gr_sockfamily_to_name(unsigned char family)
54983+{
54984+ return gr_sockfamilies[family];
54985+}
54986+
54987+int
54988+gr_search_socket(const int domain, const int type, const int protocol)
54989+{
54990+ struct acl_subject_label *curr;
54991+ const struct cred *cred = current_cred();
54992+
54993+ if (unlikely(!gr_acl_is_enabled()))
54994+ goto exit;
54995+
54996+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
54997+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
54998+ goto exit; // let the kernel handle it
54999+
55000+ curr = current->acl;
55001+
55002+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
55003+ /* the family is allowed, if this is PF_INET allow it only if
55004+ the extra sock type/protocol checks pass */
55005+ if (domain == PF_INET)
55006+ goto inet_check;
55007+ goto exit;
55008+ } else {
55009+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55010+ __u32 fakeip = 0;
55011+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55012+ current->role->roletype, cred->uid,
55013+ cred->gid, current->exec_file ?
55014+ gr_to_filename(current->exec_file->f_path.dentry,
55015+ current->exec_file->f_path.mnt) :
55016+ curr->filename, curr->filename,
55017+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
55018+ &current->signal->saved_ip);
55019+ goto exit;
55020+ }
55021+ goto exit_fail;
55022+ }
55023+
55024+inet_check:
55025+ /* the rest of this checking is for IPv4 only */
55026+ if (!curr->ips)
55027+ goto exit;
55028+
55029+ if ((curr->ip_type & (1 << type)) &&
55030+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
55031+ goto exit;
55032+
55033+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55034+ /* we don't place acls on raw sockets , and sometimes
55035+ dgram/ip sockets are opened for ioctl and not
55036+ bind/connect, so we'll fake a bind learn log */
55037+ if (type == SOCK_RAW || type == SOCK_PACKET) {
55038+ __u32 fakeip = 0;
55039+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55040+ current->role->roletype, cred->uid,
55041+ cred->gid, current->exec_file ?
55042+ gr_to_filename(current->exec_file->f_path.dentry,
55043+ current->exec_file->f_path.mnt) :
55044+ curr->filename, curr->filename,
55045+ &fakeip, 0, type,
55046+ protocol, GR_CONNECT, &current->signal->saved_ip);
55047+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
55048+ __u32 fakeip = 0;
55049+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55050+ current->role->roletype, cred->uid,
55051+ cred->gid, current->exec_file ?
55052+ gr_to_filename(current->exec_file->f_path.dentry,
55053+ current->exec_file->f_path.mnt) :
55054+ curr->filename, curr->filename,
55055+ &fakeip, 0, type,
55056+ protocol, GR_BIND, &current->signal->saved_ip);
55057+ }
55058+ /* we'll log when they use connect or bind */
55059+ goto exit;
55060+ }
55061+
55062+exit_fail:
55063+ if (domain == PF_INET)
55064+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
55065+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
55066+ else
55067+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
55068+ gr_socktype_to_name(type), protocol);
55069+
55070+ return 0;
55071+exit:
55072+ return 1;
55073+}
55074+
55075+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
55076+{
55077+ if ((ip->mode & mode) &&
55078+ (ip_port >= ip->low) &&
55079+ (ip_port <= ip->high) &&
55080+ ((ntohl(ip_addr) & our_netmask) ==
55081+ (ntohl(our_addr) & our_netmask))
55082+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
55083+ && (ip->type & (1 << type))) {
55084+ if (ip->mode & GR_INVERT)
55085+ return 2; // specifically denied
55086+ else
55087+ return 1; // allowed
55088+ }
55089+
55090+ return 0; // not specifically allowed, may continue parsing
55091+}
55092+
55093+static int
55094+gr_search_connectbind(const int full_mode, struct sock *sk,
55095+ struct sockaddr_in *addr, const int type)
55096+{
55097+ char iface[IFNAMSIZ] = {0};
55098+ struct acl_subject_label *curr;
55099+ struct acl_ip_label *ip;
55100+ struct inet_sock *isk;
55101+ struct net_device *dev;
55102+ struct in_device *idev;
55103+ unsigned long i;
55104+ int ret;
55105+ int mode = full_mode & (GR_BIND | GR_CONNECT);
55106+ __u32 ip_addr = 0;
55107+ __u32 our_addr;
55108+ __u32 our_netmask;
55109+ char *p;
55110+ __u16 ip_port = 0;
55111+ const struct cred *cred = current_cred();
55112+
55113+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
55114+ return 0;
55115+
55116+ curr = current->acl;
55117+ isk = inet_sk(sk);
55118+
55119+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
55120+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
55121+ addr->sin_addr.s_addr = curr->inaddr_any_override;
55122+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
55123+ struct sockaddr_in saddr;
55124+ int err;
55125+
55126+ saddr.sin_family = AF_INET;
55127+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
55128+ saddr.sin_port = isk->inet_sport;
55129+
55130+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55131+ if (err)
55132+ return err;
55133+
55134+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55135+ if (err)
55136+ return err;
55137+ }
55138+
55139+ if (!curr->ips)
55140+ return 0;
55141+
55142+ ip_addr = addr->sin_addr.s_addr;
55143+ ip_port = ntohs(addr->sin_port);
55144+
55145+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55146+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55147+ current->role->roletype, cred->uid,
55148+ cred->gid, current->exec_file ?
55149+ gr_to_filename(current->exec_file->f_path.dentry,
55150+ current->exec_file->f_path.mnt) :
55151+ curr->filename, curr->filename,
55152+ &ip_addr, ip_port, type,
55153+ sk->sk_protocol, mode, &current->signal->saved_ip);
55154+ return 0;
55155+ }
55156+
55157+ for (i = 0; i < curr->ip_num; i++) {
55158+ ip = *(curr->ips + i);
55159+ if (ip->iface != NULL) {
55160+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
55161+ p = strchr(iface, ':');
55162+ if (p != NULL)
55163+ *p = '\0';
55164+ dev = dev_get_by_name(sock_net(sk), iface);
55165+ if (dev == NULL)
55166+ continue;
55167+ idev = in_dev_get(dev);
55168+ if (idev == NULL) {
55169+ dev_put(dev);
55170+ continue;
55171+ }
55172+ rcu_read_lock();
55173+ for_ifa(idev) {
55174+ if (!strcmp(ip->iface, ifa->ifa_label)) {
55175+ our_addr = ifa->ifa_address;
55176+ our_netmask = 0xffffffff;
55177+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55178+ if (ret == 1) {
55179+ rcu_read_unlock();
55180+ in_dev_put(idev);
55181+ dev_put(dev);
55182+ return 0;
55183+ } else if (ret == 2) {
55184+ rcu_read_unlock();
55185+ in_dev_put(idev);
55186+ dev_put(dev);
55187+ goto denied;
55188+ }
55189+ }
55190+ } endfor_ifa(idev);
55191+ rcu_read_unlock();
55192+ in_dev_put(idev);
55193+ dev_put(dev);
55194+ } else {
55195+ our_addr = ip->addr;
55196+ our_netmask = ip->netmask;
55197+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55198+ if (ret == 1)
55199+ return 0;
55200+ else if (ret == 2)
55201+ goto denied;
55202+ }
55203+ }
55204+
55205+denied:
55206+ if (mode == GR_BIND)
55207+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55208+ else if (mode == GR_CONNECT)
55209+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55210+
55211+ return -EACCES;
55212+}
55213+
55214+int
55215+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
55216+{
55217+ /* always allow disconnection of dgram sockets with connect */
55218+ if (addr->sin_family == AF_UNSPEC)
55219+ return 0;
55220+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
55221+}
55222+
55223+int
55224+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
55225+{
55226+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
55227+}
55228+
55229+int gr_search_listen(struct socket *sock)
55230+{
55231+ struct sock *sk = sock->sk;
55232+ struct sockaddr_in addr;
55233+
55234+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55235+ addr.sin_port = inet_sk(sk)->inet_sport;
55236+
55237+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55238+}
55239+
55240+int gr_search_accept(struct socket *sock)
55241+{
55242+ struct sock *sk = sock->sk;
55243+ struct sockaddr_in addr;
55244+
55245+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55246+ addr.sin_port = inet_sk(sk)->inet_sport;
55247+
55248+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55249+}
55250+
55251+int
55252+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
55253+{
55254+ if (addr)
55255+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
55256+ else {
55257+ struct sockaddr_in sin;
55258+ const struct inet_sock *inet = inet_sk(sk);
55259+
55260+ sin.sin_addr.s_addr = inet->inet_daddr;
55261+ sin.sin_port = inet->inet_dport;
55262+
55263+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55264+ }
55265+}
55266+
55267+int
55268+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
55269+{
55270+ struct sockaddr_in sin;
55271+
55272+ if (unlikely(skb->len < sizeof (struct udphdr)))
55273+ return 0; // skip this packet
55274+
55275+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
55276+ sin.sin_port = udp_hdr(skb)->source;
55277+
55278+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55279+}
55280diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
55281new file mode 100644
55282index 0000000..25f54ef
55283--- /dev/null
55284+++ b/grsecurity/gracl_learn.c
55285@@ -0,0 +1,207 @@
55286+#include <linux/kernel.h>
55287+#include <linux/mm.h>
55288+#include <linux/sched.h>
55289+#include <linux/poll.h>
55290+#include <linux/string.h>
55291+#include <linux/file.h>
55292+#include <linux/types.h>
55293+#include <linux/vmalloc.h>
55294+#include <linux/grinternal.h>
55295+
55296+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
55297+ size_t count, loff_t *ppos);
55298+extern int gr_acl_is_enabled(void);
55299+
55300+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
55301+static int gr_learn_attached;
55302+
55303+/* use a 512k buffer */
55304+#define LEARN_BUFFER_SIZE (512 * 1024)
55305+
55306+static DEFINE_SPINLOCK(gr_learn_lock);
55307+static DEFINE_MUTEX(gr_learn_user_mutex);
55308+
55309+/* we need to maintain two buffers, so that the kernel context of grlearn
55310+ uses a semaphore around the userspace copying, and the other kernel contexts
55311+ use a spinlock when copying into the buffer, since they cannot sleep
55312+*/
55313+static char *learn_buffer;
55314+static char *learn_buffer_user;
55315+static int learn_buffer_len;
55316+static int learn_buffer_user_len;
55317+
55318+static ssize_t
55319+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
55320+{
55321+ DECLARE_WAITQUEUE(wait, current);
55322+ ssize_t retval = 0;
55323+
55324+ add_wait_queue(&learn_wait, &wait);
55325+ set_current_state(TASK_INTERRUPTIBLE);
55326+ do {
55327+ mutex_lock(&gr_learn_user_mutex);
55328+ spin_lock(&gr_learn_lock);
55329+ if (learn_buffer_len)
55330+ break;
55331+ spin_unlock(&gr_learn_lock);
55332+ mutex_unlock(&gr_learn_user_mutex);
55333+ if (file->f_flags & O_NONBLOCK) {
55334+ retval = -EAGAIN;
55335+ goto out;
55336+ }
55337+ if (signal_pending(current)) {
55338+ retval = -ERESTARTSYS;
55339+ goto out;
55340+ }
55341+
55342+ schedule();
55343+ } while (1);
55344+
55345+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
55346+ learn_buffer_user_len = learn_buffer_len;
55347+ retval = learn_buffer_len;
55348+ learn_buffer_len = 0;
55349+
55350+ spin_unlock(&gr_learn_lock);
55351+
55352+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
55353+ retval = -EFAULT;
55354+
55355+ mutex_unlock(&gr_learn_user_mutex);
55356+out:
55357+ set_current_state(TASK_RUNNING);
55358+ remove_wait_queue(&learn_wait, &wait);
55359+ return retval;
55360+}
55361+
55362+static unsigned int
55363+poll_learn(struct file * file, poll_table * wait)
55364+{
55365+ poll_wait(file, &learn_wait, wait);
55366+
55367+ if (learn_buffer_len)
55368+ return (POLLIN | POLLRDNORM);
55369+
55370+ return 0;
55371+}
55372+
55373+void
55374+gr_clear_learn_entries(void)
55375+{
55376+ char *tmp;
55377+
55378+ mutex_lock(&gr_learn_user_mutex);
55379+ spin_lock(&gr_learn_lock);
55380+ tmp = learn_buffer;
55381+ learn_buffer = NULL;
55382+ spin_unlock(&gr_learn_lock);
55383+ if (tmp)
55384+ vfree(tmp);
55385+ if (learn_buffer_user != NULL) {
55386+ vfree(learn_buffer_user);
55387+ learn_buffer_user = NULL;
55388+ }
55389+ learn_buffer_len = 0;
55390+ mutex_unlock(&gr_learn_user_mutex);
55391+
55392+ return;
55393+}
55394+
55395+void
55396+gr_add_learn_entry(const char *fmt, ...)
55397+{
55398+ va_list args;
55399+ unsigned int len;
55400+
55401+ if (!gr_learn_attached)
55402+ return;
55403+
55404+ spin_lock(&gr_learn_lock);
55405+
55406+ /* leave a gap at the end so we know when it's "full" but don't have to
55407+ compute the exact length of the string we're trying to append
55408+ */
55409+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
55410+ spin_unlock(&gr_learn_lock);
55411+ wake_up_interruptible(&learn_wait);
55412+ return;
55413+ }
55414+ if (learn_buffer == NULL) {
55415+ spin_unlock(&gr_learn_lock);
55416+ return;
55417+ }
55418+
55419+ va_start(args, fmt);
55420+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
55421+ va_end(args);
55422+
55423+ learn_buffer_len += len + 1;
55424+
55425+ spin_unlock(&gr_learn_lock);
55426+ wake_up_interruptible(&learn_wait);
55427+
55428+ return;
55429+}
55430+
55431+static int
55432+open_learn(struct inode *inode, struct file *file)
55433+{
55434+ if (file->f_mode & FMODE_READ && gr_learn_attached)
55435+ return -EBUSY;
55436+ if (file->f_mode & FMODE_READ) {
55437+ int retval = 0;
55438+ mutex_lock(&gr_learn_user_mutex);
55439+ if (learn_buffer == NULL)
55440+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
55441+ if (learn_buffer_user == NULL)
55442+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
55443+ if (learn_buffer == NULL) {
55444+ retval = -ENOMEM;
55445+ goto out_error;
55446+ }
55447+ if (learn_buffer_user == NULL) {
55448+ retval = -ENOMEM;
55449+ goto out_error;
55450+ }
55451+ learn_buffer_len = 0;
55452+ learn_buffer_user_len = 0;
55453+ gr_learn_attached = 1;
55454+out_error:
55455+ mutex_unlock(&gr_learn_user_mutex);
55456+ return retval;
55457+ }
55458+ return 0;
55459+}
55460+
55461+static int
55462+close_learn(struct inode *inode, struct file *file)
55463+{
55464+ if (file->f_mode & FMODE_READ) {
55465+ char *tmp = NULL;
55466+ mutex_lock(&gr_learn_user_mutex);
55467+ spin_lock(&gr_learn_lock);
55468+ tmp = learn_buffer;
55469+ learn_buffer = NULL;
55470+ spin_unlock(&gr_learn_lock);
55471+ if (tmp)
55472+ vfree(tmp);
55473+ if (learn_buffer_user != NULL) {
55474+ vfree(learn_buffer_user);
55475+ learn_buffer_user = NULL;
55476+ }
55477+ learn_buffer_len = 0;
55478+ learn_buffer_user_len = 0;
55479+ gr_learn_attached = 0;
55480+ mutex_unlock(&gr_learn_user_mutex);
55481+ }
55482+
55483+ return 0;
55484+}
55485+
55486+const struct file_operations grsec_fops = {
55487+ .read = read_learn,
55488+ .write = write_grsec_handler,
55489+ .open = open_learn,
55490+ .release = close_learn,
55491+ .poll = poll_learn,
55492+};
55493diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
55494new file mode 100644
55495index 0000000..39645c9
55496--- /dev/null
55497+++ b/grsecurity/gracl_res.c
55498@@ -0,0 +1,68 @@
55499+#include <linux/kernel.h>
55500+#include <linux/sched.h>
55501+#include <linux/gracl.h>
55502+#include <linux/grinternal.h>
55503+
55504+static const char *restab_log[] = {
55505+ [RLIMIT_CPU] = "RLIMIT_CPU",
55506+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
55507+ [RLIMIT_DATA] = "RLIMIT_DATA",
55508+ [RLIMIT_STACK] = "RLIMIT_STACK",
55509+ [RLIMIT_CORE] = "RLIMIT_CORE",
55510+ [RLIMIT_RSS] = "RLIMIT_RSS",
55511+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
55512+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
55513+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
55514+ [RLIMIT_AS] = "RLIMIT_AS",
55515+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
55516+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
55517+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
55518+ [RLIMIT_NICE] = "RLIMIT_NICE",
55519+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
55520+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
55521+ [GR_CRASH_RES] = "RLIMIT_CRASH"
55522+};
55523+
55524+void
55525+gr_log_resource(const struct task_struct *task,
55526+ const int res, const unsigned long wanted, const int gt)
55527+{
55528+ const struct cred *cred;
55529+ unsigned long rlim;
55530+
55531+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
55532+ return;
55533+
55534+ // not yet supported resource
55535+ if (unlikely(!restab_log[res]))
55536+ return;
55537+
55538+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
55539+ rlim = task_rlimit_max(task, res);
55540+ else
55541+ rlim = task_rlimit(task, res);
55542+
55543+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
55544+ return;
55545+
55546+ rcu_read_lock();
55547+ cred = __task_cred(task);
55548+
55549+ if (res == RLIMIT_NPROC &&
55550+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
55551+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
55552+ goto out_rcu_unlock;
55553+ else if (res == RLIMIT_MEMLOCK &&
55554+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
55555+ goto out_rcu_unlock;
55556+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
55557+ goto out_rcu_unlock;
55558+ rcu_read_unlock();
55559+
55560+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
55561+
55562+ return;
55563+out_rcu_unlock:
55564+ rcu_read_unlock();
55565+ return;
55566+}
55567diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
55568new file mode 100644
55569index 0000000..5556be3
55570--- /dev/null
55571+++ b/grsecurity/gracl_segv.c
55572@@ -0,0 +1,299 @@
55573+#include <linux/kernel.h>
55574+#include <linux/mm.h>
55575+#include <asm/uaccess.h>
55576+#include <asm/errno.h>
55577+#include <asm/mman.h>
55578+#include <net/sock.h>
55579+#include <linux/file.h>
55580+#include <linux/fs.h>
55581+#include <linux/net.h>
55582+#include <linux/in.h>
55583+#include <linux/slab.h>
55584+#include <linux/types.h>
55585+#include <linux/sched.h>
55586+#include <linux/timer.h>
55587+#include <linux/gracl.h>
55588+#include <linux/grsecurity.h>
55589+#include <linux/grinternal.h>
55590+
55591+static struct crash_uid *uid_set;
55592+static unsigned short uid_used;
55593+static DEFINE_SPINLOCK(gr_uid_lock);
55594+extern rwlock_t gr_inode_lock;
55595+extern struct acl_subject_label *
55596+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
55597+ struct acl_role_label *role);
55598+
55599+#ifdef CONFIG_BTRFS_FS
55600+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
55601+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
55602+#endif
55603+
55604+static inline dev_t __get_dev(const struct dentry *dentry)
55605+{
55606+#ifdef CONFIG_BTRFS_FS
55607+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
55608+ return get_btrfs_dev_from_inode(dentry->d_inode);
55609+ else
55610+#endif
55611+ return dentry->d_inode->i_sb->s_dev;
55612+}
55613+
55614+int
55615+gr_init_uidset(void)
55616+{
55617+ uid_set =
55618+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
55619+ uid_used = 0;
55620+
55621+ return uid_set ? 1 : 0;
55622+}
55623+
55624+void
55625+gr_free_uidset(void)
55626+{
55627+ if (uid_set)
55628+ kfree(uid_set);
55629+
55630+ return;
55631+}
55632+
55633+int
55634+gr_find_uid(const uid_t uid)
55635+{
55636+ struct crash_uid *tmp = uid_set;
55637+ uid_t buid;
55638+ int low = 0, high = uid_used - 1, mid;
55639+
55640+ while (high >= low) {
55641+ mid = (low + high) >> 1;
55642+ buid = tmp[mid].uid;
55643+ if (buid == uid)
55644+ return mid;
55645+ if (buid > uid)
55646+ high = mid - 1;
55647+ if (buid < uid)
55648+ low = mid + 1;
55649+ }
55650+
55651+ return -1;
55652+}
55653+
55654+static __inline__ void
55655+gr_insertsort(void)
55656+{
55657+ unsigned short i, j;
55658+ struct crash_uid index;
55659+
55660+ for (i = 1; i < uid_used; i++) {
55661+ index = uid_set[i];
55662+ j = i;
55663+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
55664+ uid_set[j] = uid_set[j - 1];
55665+ j--;
55666+ }
55667+ uid_set[j] = index;
55668+ }
55669+
55670+ return;
55671+}
55672+
55673+static __inline__ void
55674+gr_insert_uid(const uid_t uid, const unsigned long expires)
55675+{
55676+ int loc;
55677+
55678+ if (uid_used == GR_UIDTABLE_MAX)
55679+ return;
55680+
55681+ loc = gr_find_uid(uid);
55682+
55683+ if (loc >= 0) {
55684+ uid_set[loc].expires = expires;
55685+ return;
55686+ }
55687+
55688+ uid_set[uid_used].uid = uid;
55689+ uid_set[uid_used].expires = expires;
55690+ uid_used++;
55691+
55692+ gr_insertsort();
55693+
55694+ return;
55695+}
55696+
55697+void
55698+gr_remove_uid(const unsigned short loc)
55699+{
55700+ unsigned short i;
55701+
55702+ for (i = loc + 1; i < uid_used; i++)
55703+ uid_set[i - 1] = uid_set[i];
55704+
55705+ uid_used--;
55706+
55707+ return;
55708+}
55709+
55710+int
55711+gr_check_crash_uid(const uid_t uid)
55712+{
55713+ int loc;
55714+ int ret = 0;
55715+
55716+ if (unlikely(!gr_acl_is_enabled()))
55717+ return 0;
55718+
55719+ spin_lock(&gr_uid_lock);
55720+ loc = gr_find_uid(uid);
55721+
55722+ if (loc < 0)
55723+ goto out_unlock;
55724+
55725+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
55726+ gr_remove_uid(loc);
55727+ else
55728+ ret = 1;
55729+
55730+out_unlock:
55731+ spin_unlock(&gr_uid_lock);
55732+ return ret;
55733+}
55734+
55735+static __inline__ int
55736+proc_is_setxid(const struct cred *cred)
55737+{
55738+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
55739+ cred->uid != cred->fsuid)
55740+ return 1;
55741+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
55742+ cred->gid != cred->fsgid)
55743+ return 1;
55744+
55745+ return 0;
55746+}
55747+
55748+extern int gr_fake_force_sig(int sig, struct task_struct *t);
55749+
55750+void
55751+gr_handle_crash(struct task_struct *task, const int sig)
55752+{
55753+ struct acl_subject_label *curr;
55754+ struct task_struct *tsk, *tsk2;
55755+ const struct cred *cred;
55756+ const struct cred *cred2;
55757+
55758+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
55759+ return;
55760+
55761+ if (unlikely(!gr_acl_is_enabled()))
55762+ return;
55763+
55764+ curr = task->acl;
55765+
55766+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
55767+ return;
55768+
55769+ if (time_before_eq(curr->expires, get_seconds())) {
55770+ curr->expires = 0;
55771+ curr->crashes = 0;
55772+ }
55773+
55774+ curr->crashes++;
55775+
55776+ if (!curr->expires)
55777+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
55778+
55779+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
55780+ time_after(curr->expires, get_seconds())) {
55781+ rcu_read_lock();
55782+ cred = __task_cred(task);
55783+ if (cred->uid && proc_is_setxid(cred)) {
55784+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
55785+ spin_lock(&gr_uid_lock);
55786+ gr_insert_uid(cred->uid, curr->expires);
55787+ spin_unlock(&gr_uid_lock);
55788+ curr->expires = 0;
55789+ curr->crashes = 0;
55790+ read_lock(&tasklist_lock);
55791+ do_each_thread(tsk2, tsk) {
55792+ cred2 = __task_cred(tsk);
55793+ if (tsk != task && cred2->uid == cred->uid)
55794+ gr_fake_force_sig(SIGKILL, tsk);
55795+ } while_each_thread(tsk2, tsk);
55796+ read_unlock(&tasklist_lock);
55797+ } else {
55798+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
55799+ read_lock(&tasklist_lock);
55800+ read_lock(&grsec_exec_file_lock);
55801+ do_each_thread(tsk2, tsk) {
55802+ if (likely(tsk != task)) {
55803+ // if this thread has the same subject as the one that triggered
55804+ // RES_CRASH and it's the same binary, kill it
55805+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
55806+ gr_fake_force_sig(SIGKILL, tsk);
55807+ }
55808+ } while_each_thread(tsk2, tsk);
55809+ read_unlock(&grsec_exec_file_lock);
55810+ read_unlock(&tasklist_lock);
55811+ }
55812+ rcu_read_unlock();
55813+ }
55814+
55815+ return;
55816+}
55817+
55818+int
55819+gr_check_crash_exec(const struct file *filp)
55820+{
55821+ struct acl_subject_label *curr;
55822+
55823+ if (unlikely(!gr_acl_is_enabled()))
55824+ return 0;
55825+
55826+ read_lock(&gr_inode_lock);
55827+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
55828+ __get_dev(filp->f_path.dentry),
55829+ current->role);
55830+ read_unlock(&gr_inode_lock);
55831+
55832+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
55833+ (!curr->crashes && !curr->expires))
55834+ return 0;
55835+
55836+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
55837+ time_after(curr->expires, get_seconds()))
55838+ return 1;
55839+ else if (time_before_eq(curr->expires, get_seconds())) {
55840+ curr->crashes = 0;
55841+ curr->expires = 0;
55842+ }
55843+
55844+ return 0;
55845+}
55846+
55847+void
55848+gr_handle_alertkill(struct task_struct *task)
55849+{
55850+ struct acl_subject_label *curracl;
55851+ __u32 curr_ip;
55852+ struct task_struct *p, *p2;
55853+
55854+ if (unlikely(!gr_acl_is_enabled()))
55855+ return;
55856+
55857+ curracl = task->acl;
55858+ curr_ip = task->signal->curr_ip;
55859+
55860+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
55861+ read_lock(&tasklist_lock);
55862+ do_each_thread(p2, p) {
55863+ if (p->signal->curr_ip == curr_ip)
55864+ gr_fake_force_sig(SIGKILL, p);
55865+ } while_each_thread(p2, p);
55866+ read_unlock(&tasklist_lock);
55867+ } else if (curracl->mode & GR_KILLPROC)
55868+ gr_fake_force_sig(SIGKILL, task);
55869+
55870+ return;
55871+}
55872diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
55873new file mode 100644
55874index 0000000..9d83a69
55875--- /dev/null
55876+++ b/grsecurity/gracl_shm.c
55877@@ -0,0 +1,40 @@
55878+#include <linux/kernel.h>
55879+#include <linux/mm.h>
55880+#include <linux/sched.h>
55881+#include <linux/file.h>
55882+#include <linux/ipc.h>
55883+#include <linux/gracl.h>
55884+#include <linux/grsecurity.h>
55885+#include <linux/grinternal.h>
55886+
55887+int
55888+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55889+ const time_t shm_createtime, const uid_t cuid, const int shmid)
55890+{
55891+ struct task_struct *task;
55892+
55893+ if (!gr_acl_is_enabled())
55894+ return 1;
55895+
55896+ rcu_read_lock();
55897+ read_lock(&tasklist_lock);
55898+
55899+ task = find_task_by_vpid(shm_cprid);
55900+
55901+ if (unlikely(!task))
55902+ task = find_task_by_vpid(shm_lapid);
55903+
55904+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
55905+ (task->pid == shm_lapid)) &&
55906+ (task->acl->mode & GR_PROTSHM) &&
55907+ (task->acl != current->acl))) {
55908+ read_unlock(&tasklist_lock);
55909+ rcu_read_unlock();
55910+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
55911+ return 0;
55912+ }
55913+ read_unlock(&tasklist_lock);
55914+ rcu_read_unlock();
55915+
55916+ return 1;
55917+}
55918diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
55919new file mode 100644
55920index 0000000..bc0be01
55921--- /dev/null
55922+++ b/grsecurity/grsec_chdir.c
55923@@ -0,0 +1,19 @@
55924+#include <linux/kernel.h>
55925+#include <linux/sched.h>
55926+#include <linux/fs.h>
55927+#include <linux/file.h>
55928+#include <linux/grsecurity.h>
55929+#include <linux/grinternal.h>
55930+
55931+void
55932+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
55933+{
55934+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55935+ if ((grsec_enable_chdir && grsec_enable_group &&
55936+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
55937+ !grsec_enable_group)) {
55938+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
55939+ }
55940+#endif
55941+ return;
55942+}
55943diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
55944new file mode 100644
55945index 0000000..9807ee2
55946--- /dev/null
55947+++ b/grsecurity/grsec_chroot.c
55948@@ -0,0 +1,368 @@
55949+#include <linux/kernel.h>
55950+#include <linux/module.h>
55951+#include <linux/sched.h>
55952+#include <linux/file.h>
55953+#include <linux/fs.h>
55954+#include <linux/mount.h>
55955+#include <linux/types.h>
55956+#include "../fs/mount.h"
55957+#include <linux/grsecurity.h>
55958+#include <linux/grinternal.h>
55959+
55960+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
55961+{
55962+#ifdef CONFIG_GRKERNSEC
55963+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
55964+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
55965+ task->gr_is_chrooted = 1;
55966+ else
55967+ task->gr_is_chrooted = 0;
55968+
55969+ task->gr_chroot_dentry = path->dentry;
55970+#endif
55971+ return;
55972+}
55973+
55974+void gr_clear_chroot_entries(struct task_struct *task)
55975+{
55976+#ifdef CONFIG_GRKERNSEC
55977+ task->gr_is_chrooted = 0;
55978+ task->gr_chroot_dentry = NULL;
55979+#endif
55980+ return;
55981+}
55982+
55983+int
55984+gr_handle_chroot_unix(const pid_t pid)
55985+{
55986+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55987+ struct task_struct *p;
55988+
55989+ if (unlikely(!grsec_enable_chroot_unix))
55990+ return 1;
55991+
55992+ if (likely(!proc_is_chrooted(current)))
55993+ return 1;
55994+
55995+ rcu_read_lock();
55996+ read_lock(&tasklist_lock);
55997+ p = find_task_by_vpid_unrestricted(pid);
55998+ if (unlikely(p && !have_same_root(current, p))) {
55999+ read_unlock(&tasklist_lock);
56000+ rcu_read_unlock();
56001+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
56002+ return 0;
56003+ }
56004+ read_unlock(&tasklist_lock);
56005+ rcu_read_unlock();
56006+#endif
56007+ return 1;
56008+}
56009+
56010+int
56011+gr_handle_chroot_nice(void)
56012+{
56013+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56014+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
56015+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
56016+ return -EPERM;
56017+ }
56018+#endif
56019+ return 0;
56020+}
56021+
56022+int
56023+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
56024+{
56025+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56026+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
56027+ && proc_is_chrooted(current)) {
56028+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
56029+ return -EACCES;
56030+ }
56031+#endif
56032+ return 0;
56033+}
56034+
56035+int
56036+gr_handle_chroot_rawio(const struct inode *inode)
56037+{
56038+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56039+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
56040+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
56041+ return 1;
56042+#endif
56043+ return 0;
56044+}
56045+
56046+int
56047+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
56048+{
56049+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56050+ struct task_struct *p;
56051+ int ret = 0;
56052+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
56053+ return ret;
56054+
56055+ read_lock(&tasklist_lock);
56056+ do_each_pid_task(pid, type, p) {
56057+ if (!have_same_root(current, p)) {
56058+ ret = 1;
56059+ goto out;
56060+ }
56061+ } while_each_pid_task(pid, type, p);
56062+out:
56063+ read_unlock(&tasklist_lock);
56064+ return ret;
56065+#endif
56066+ return 0;
56067+}
56068+
56069+int
56070+gr_pid_is_chrooted(struct task_struct *p)
56071+{
56072+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56073+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
56074+ return 0;
56075+
56076+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
56077+ !have_same_root(current, p)) {
56078+ return 1;
56079+ }
56080+#endif
56081+ return 0;
56082+}
56083+
56084+EXPORT_SYMBOL(gr_pid_is_chrooted);
56085+
56086+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
56087+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
56088+{
56089+ struct path path, currentroot;
56090+ int ret = 0;
56091+
56092+ path.dentry = (struct dentry *)u_dentry;
56093+ path.mnt = (struct vfsmount *)u_mnt;
56094+ get_fs_root(current->fs, &currentroot);
56095+ if (path_is_under(&path, &currentroot))
56096+ ret = 1;
56097+ path_put(&currentroot);
56098+
56099+ return ret;
56100+}
56101+#endif
56102+
56103+int
56104+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
56105+{
56106+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56107+ if (!grsec_enable_chroot_fchdir)
56108+ return 1;
56109+
56110+ if (!proc_is_chrooted(current))
56111+ return 1;
56112+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
56113+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
56114+ return 0;
56115+ }
56116+#endif
56117+ return 1;
56118+}
56119+
56120+int
56121+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56122+ const time_t shm_createtime)
56123+{
56124+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56125+ struct task_struct *p;
56126+ time_t starttime;
56127+
56128+ if (unlikely(!grsec_enable_chroot_shmat))
56129+ return 1;
56130+
56131+ if (likely(!proc_is_chrooted(current)))
56132+ return 1;
56133+
56134+ rcu_read_lock();
56135+ read_lock(&tasklist_lock);
56136+
56137+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
56138+ starttime = p->start_time.tv_sec;
56139+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
56140+ if (have_same_root(current, p)) {
56141+ goto allow;
56142+ } else {
56143+ read_unlock(&tasklist_lock);
56144+ rcu_read_unlock();
56145+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56146+ return 0;
56147+ }
56148+ }
56149+ /* creator exited, pid reuse, fall through to next check */
56150+ }
56151+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
56152+ if (unlikely(!have_same_root(current, p))) {
56153+ read_unlock(&tasklist_lock);
56154+ rcu_read_unlock();
56155+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56156+ return 0;
56157+ }
56158+ }
56159+
56160+allow:
56161+ read_unlock(&tasklist_lock);
56162+ rcu_read_unlock();
56163+#endif
56164+ return 1;
56165+}
56166+
56167+void
56168+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
56169+{
56170+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56171+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
56172+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
56173+#endif
56174+ return;
56175+}
56176+
56177+int
56178+gr_handle_chroot_mknod(const struct dentry *dentry,
56179+ const struct vfsmount *mnt, const int mode)
56180+{
56181+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56182+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
56183+ proc_is_chrooted(current)) {
56184+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
56185+ return -EPERM;
56186+ }
56187+#endif
56188+ return 0;
56189+}
56190+
56191+int
56192+gr_handle_chroot_mount(const struct dentry *dentry,
56193+ const struct vfsmount *mnt, const char *dev_name)
56194+{
56195+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56196+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
56197+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
56198+ return -EPERM;
56199+ }
56200+#endif
56201+ return 0;
56202+}
56203+
56204+int
56205+gr_handle_chroot_pivot(void)
56206+{
56207+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56208+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
56209+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
56210+ return -EPERM;
56211+ }
56212+#endif
56213+ return 0;
56214+}
56215+
56216+int
56217+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
56218+{
56219+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56220+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
56221+ !gr_is_outside_chroot(dentry, mnt)) {
56222+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
56223+ return -EPERM;
56224+ }
56225+#endif
56226+ return 0;
56227+}
56228+
56229+extern const char *captab_log[];
56230+extern int captab_log_entries;
56231+
56232+int
56233+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
56234+{
56235+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56236+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
56237+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56238+ if (cap_raised(chroot_caps, cap)) {
56239+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
56240+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
56241+ }
56242+ return 0;
56243+ }
56244+ }
56245+#endif
56246+ return 1;
56247+}
56248+
56249+int
56250+gr_chroot_is_capable(const int cap)
56251+{
56252+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56253+ return gr_task_chroot_is_capable(current, current_cred(), cap);
56254+#endif
56255+ return 1;
56256+}
56257+
56258+int
56259+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
56260+{
56261+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56262+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
56263+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56264+ if (cap_raised(chroot_caps, cap)) {
56265+ return 0;
56266+ }
56267+ }
56268+#endif
56269+ return 1;
56270+}
56271+
56272+int
56273+gr_chroot_is_capable_nolog(const int cap)
56274+{
56275+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56276+ return gr_task_chroot_is_capable_nolog(current, cap);
56277+#endif
56278+ return 1;
56279+}
56280+
56281+int
56282+gr_handle_chroot_sysctl(const int op)
56283+{
56284+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56285+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
56286+ proc_is_chrooted(current))
56287+ return -EACCES;
56288+#endif
56289+ return 0;
56290+}
56291+
56292+void
56293+gr_handle_chroot_chdir(struct path *path)
56294+{
56295+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56296+ if (grsec_enable_chroot_chdir)
56297+ set_fs_pwd(current->fs, path);
56298+#endif
56299+ return;
56300+}
56301+
56302+int
56303+gr_handle_chroot_chmod(const struct dentry *dentry,
56304+ const struct vfsmount *mnt, const int mode)
56305+{
56306+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56307+ /* allow chmod +s on directories, but not files */
56308+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
56309+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
56310+ proc_is_chrooted(current)) {
56311+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
56312+ return -EPERM;
56313+ }
56314+#endif
56315+ return 0;
56316+}
56317diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
56318new file mode 100644
56319index 0000000..213ad8b
56320--- /dev/null
56321+++ b/grsecurity/grsec_disabled.c
56322@@ -0,0 +1,437 @@
56323+#include <linux/kernel.h>
56324+#include <linux/module.h>
56325+#include <linux/sched.h>
56326+#include <linux/file.h>
56327+#include <linux/fs.h>
56328+#include <linux/kdev_t.h>
56329+#include <linux/net.h>
56330+#include <linux/in.h>
56331+#include <linux/ip.h>
56332+#include <linux/skbuff.h>
56333+#include <linux/sysctl.h>
56334+
56335+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56336+void
56337+pax_set_initial_flags(struct linux_binprm *bprm)
56338+{
56339+ return;
56340+}
56341+#endif
56342+
56343+#ifdef CONFIG_SYSCTL
56344+__u32
56345+gr_handle_sysctl(const struct ctl_table * table, const int op)
56346+{
56347+ return 0;
56348+}
56349+#endif
56350+
56351+#ifdef CONFIG_TASKSTATS
56352+int gr_is_taskstats_denied(int pid)
56353+{
56354+ return 0;
56355+}
56356+#endif
56357+
56358+int
56359+gr_acl_is_enabled(void)
56360+{
56361+ return 0;
56362+}
56363+
56364+void
56365+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
56366+{
56367+ return;
56368+}
56369+
56370+int
56371+gr_handle_rawio(const struct inode *inode)
56372+{
56373+ return 0;
56374+}
56375+
56376+void
56377+gr_acl_handle_psacct(struct task_struct *task, const long code)
56378+{
56379+ return;
56380+}
56381+
56382+int
56383+gr_handle_ptrace(struct task_struct *task, const long request)
56384+{
56385+ return 0;
56386+}
56387+
56388+int
56389+gr_handle_proc_ptrace(struct task_struct *task)
56390+{
56391+ return 0;
56392+}
56393+
56394+void
56395+gr_learn_resource(const struct task_struct *task,
56396+ const int res, const unsigned long wanted, const int gt)
56397+{
56398+ return;
56399+}
56400+
56401+int
56402+gr_set_acls(const int type)
56403+{
56404+ return 0;
56405+}
56406+
56407+int
56408+gr_check_hidden_task(const struct task_struct *tsk)
56409+{
56410+ return 0;
56411+}
56412+
56413+int
56414+gr_check_protected_task(const struct task_struct *task)
56415+{
56416+ return 0;
56417+}
56418+
56419+int
56420+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
56421+{
56422+ return 0;
56423+}
56424+
56425+void
56426+gr_copy_label(struct task_struct *tsk)
56427+{
56428+ return;
56429+}
56430+
56431+void
56432+gr_set_pax_flags(struct task_struct *task)
56433+{
56434+ return;
56435+}
56436+
56437+int
56438+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
56439+ const int unsafe_share)
56440+{
56441+ return 0;
56442+}
56443+
56444+void
56445+gr_handle_delete(const ino_t ino, const dev_t dev)
56446+{
56447+ return;
56448+}
56449+
56450+void
56451+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
56452+{
56453+ return;
56454+}
56455+
56456+void
56457+gr_handle_crash(struct task_struct *task, const int sig)
56458+{
56459+ return;
56460+}
56461+
56462+int
56463+gr_check_crash_exec(const struct file *filp)
56464+{
56465+ return 0;
56466+}
56467+
56468+int
56469+gr_check_crash_uid(const uid_t uid)
56470+{
56471+ return 0;
56472+}
56473+
56474+void
56475+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
56476+ struct dentry *old_dentry,
56477+ struct dentry *new_dentry,
56478+ struct vfsmount *mnt, const __u8 replace)
56479+{
56480+ return;
56481+}
56482+
56483+int
56484+gr_search_socket(const int family, const int type, const int protocol)
56485+{
56486+ return 1;
56487+}
56488+
56489+int
56490+gr_search_connectbind(const int mode, const struct socket *sock,
56491+ const struct sockaddr_in *addr)
56492+{
56493+ return 0;
56494+}
56495+
56496+void
56497+gr_handle_alertkill(struct task_struct *task)
56498+{
56499+ return;
56500+}
56501+
56502+__u32
56503+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
56504+{
56505+ return 1;
56506+}
56507+
56508+__u32
56509+gr_acl_handle_hidden_file(const struct dentry * dentry,
56510+ const struct vfsmount * mnt)
56511+{
56512+ return 1;
56513+}
56514+
56515+__u32
56516+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
56517+ int acc_mode)
56518+{
56519+ return 1;
56520+}
56521+
56522+__u32
56523+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
56524+{
56525+ return 1;
56526+}
56527+
56528+__u32
56529+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
56530+{
56531+ return 1;
56532+}
56533+
56534+int
56535+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
56536+ unsigned int *vm_flags)
56537+{
56538+ return 1;
56539+}
56540+
56541+__u32
56542+gr_acl_handle_truncate(const struct dentry * dentry,
56543+ const struct vfsmount * mnt)
56544+{
56545+ return 1;
56546+}
56547+
56548+__u32
56549+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
56550+{
56551+ return 1;
56552+}
56553+
56554+__u32
56555+gr_acl_handle_access(const struct dentry * dentry,
56556+ const struct vfsmount * mnt, const int fmode)
56557+{
56558+ return 1;
56559+}
56560+
56561+__u32
56562+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
56563+ umode_t *mode)
56564+{
56565+ return 1;
56566+}
56567+
56568+__u32
56569+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
56570+{
56571+ return 1;
56572+}
56573+
56574+__u32
56575+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
56576+{
56577+ return 1;
56578+}
56579+
56580+void
56581+grsecurity_init(void)
56582+{
56583+ return;
56584+}
56585+
56586+umode_t gr_acl_umask(void)
56587+{
56588+ return 0;
56589+}
56590+
56591+__u32
56592+gr_acl_handle_mknod(const struct dentry * new_dentry,
56593+ const struct dentry * parent_dentry,
56594+ const struct vfsmount * parent_mnt,
56595+ const int mode)
56596+{
56597+ return 1;
56598+}
56599+
56600+__u32
56601+gr_acl_handle_mkdir(const struct dentry * new_dentry,
56602+ const struct dentry * parent_dentry,
56603+ const struct vfsmount * parent_mnt)
56604+{
56605+ return 1;
56606+}
56607+
56608+__u32
56609+gr_acl_handle_symlink(const struct dentry * new_dentry,
56610+ const struct dentry * parent_dentry,
56611+ const struct vfsmount * parent_mnt, const char *from)
56612+{
56613+ return 1;
56614+}
56615+
56616+__u32
56617+gr_acl_handle_link(const struct dentry * new_dentry,
56618+ const struct dentry * parent_dentry,
56619+ const struct vfsmount * parent_mnt,
56620+ const struct dentry * old_dentry,
56621+ const struct vfsmount * old_mnt, const char *to)
56622+{
56623+ return 1;
56624+}
56625+
56626+int
56627+gr_acl_handle_rename(const struct dentry *new_dentry,
56628+ const struct dentry *parent_dentry,
56629+ const struct vfsmount *parent_mnt,
56630+ const struct dentry *old_dentry,
56631+ const struct inode *old_parent_inode,
56632+ const struct vfsmount *old_mnt, const char *newname)
56633+{
56634+ return 0;
56635+}
56636+
56637+int
56638+gr_acl_handle_filldir(const struct file *file, const char *name,
56639+ const int namelen, const ino_t ino)
56640+{
56641+ return 1;
56642+}
56643+
56644+int
56645+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56646+ const time_t shm_createtime, const uid_t cuid, const int shmid)
56647+{
56648+ return 1;
56649+}
56650+
56651+int
56652+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
56653+{
56654+ return 0;
56655+}
56656+
56657+int
56658+gr_search_accept(const struct socket *sock)
56659+{
56660+ return 0;
56661+}
56662+
56663+int
56664+gr_search_listen(const struct socket *sock)
56665+{
56666+ return 0;
56667+}
56668+
56669+int
56670+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
56671+{
56672+ return 0;
56673+}
56674+
56675+__u32
56676+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
56677+{
56678+ return 1;
56679+}
56680+
56681+__u32
56682+gr_acl_handle_creat(const struct dentry * dentry,
56683+ const struct dentry * p_dentry,
56684+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
56685+ const int imode)
56686+{
56687+ return 1;
56688+}
56689+
56690+void
56691+gr_acl_handle_exit(void)
56692+{
56693+ return;
56694+}
56695+
56696+int
56697+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
56698+{
56699+ return 1;
56700+}
56701+
56702+void
56703+gr_set_role_label(const uid_t uid, const gid_t gid)
56704+{
56705+ return;
56706+}
56707+
56708+int
56709+gr_acl_handle_procpidmem(const struct task_struct *task)
56710+{
56711+ return 0;
56712+}
56713+
56714+int
56715+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
56716+{
56717+ return 0;
56718+}
56719+
56720+int
56721+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
56722+{
56723+ return 0;
56724+}
56725+
56726+void
56727+gr_set_kernel_label(struct task_struct *task)
56728+{
56729+ return;
56730+}
56731+
56732+int
56733+gr_check_user_change(int real, int effective, int fs)
56734+{
56735+ return 0;
56736+}
56737+
56738+int
56739+gr_check_group_change(int real, int effective, int fs)
56740+{
56741+ return 0;
56742+}
56743+
56744+int gr_acl_enable_at_secure(void)
56745+{
56746+ return 0;
56747+}
56748+
56749+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
56750+{
56751+ return dentry->d_inode->i_sb->s_dev;
56752+}
56753+
56754+EXPORT_SYMBOL(gr_learn_resource);
56755+EXPORT_SYMBOL(gr_set_kernel_label);
56756+#ifdef CONFIG_SECURITY
56757+EXPORT_SYMBOL(gr_check_user_change);
56758+EXPORT_SYMBOL(gr_check_group_change);
56759+#endif
56760diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
56761new file mode 100644
56762index 0000000..abfa971
56763--- /dev/null
56764+++ b/grsecurity/grsec_exec.c
56765@@ -0,0 +1,174 @@
56766+#include <linux/kernel.h>
56767+#include <linux/sched.h>
56768+#include <linux/file.h>
56769+#include <linux/binfmts.h>
56770+#include <linux/fs.h>
56771+#include <linux/types.h>
56772+#include <linux/grdefs.h>
56773+#include <linux/grsecurity.h>
56774+#include <linux/grinternal.h>
56775+#include <linux/capability.h>
56776+#include <linux/module.h>
56777+
56778+#include <asm/uaccess.h>
56779+
56780+#ifdef CONFIG_GRKERNSEC_EXECLOG
56781+static char gr_exec_arg_buf[132];
56782+static DEFINE_MUTEX(gr_exec_arg_mutex);
56783+#endif
56784+
56785+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
56786+
56787+void
56788+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
56789+{
56790+#ifdef CONFIG_GRKERNSEC_EXECLOG
56791+ char *grarg = gr_exec_arg_buf;
56792+ unsigned int i, x, execlen = 0;
56793+ char c;
56794+
56795+ if (!((grsec_enable_execlog && grsec_enable_group &&
56796+ in_group_p(grsec_audit_gid))
56797+ || (grsec_enable_execlog && !grsec_enable_group)))
56798+ return;
56799+
56800+ mutex_lock(&gr_exec_arg_mutex);
56801+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
56802+
56803+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
56804+ const char __user *p;
56805+ unsigned int len;
56806+
56807+ p = get_user_arg_ptr(argv, i);
56808+ if (IS_ERR(p))
56809+ goto log;
56810+
56811+ len = strnlen_user(p, 128 - execlen);
56812+ if (len > 128 - execlen)
56813+ len = 128 - execlen;
56814+ else if (len > 0)
56815+ len--;
56816+ if (copy_from_user(grarg + execlen, p, len))
56817+ goto log;
56818+
56819+ /* rewrite unprintable characters */
56820+ for (x = 0; x < len; x++) {
56821+ c = *(grarg + execlen + x);
56822+ if (c < 32 || c > 126)
56823+ *(grarg + execlen + x) = ' ';
56824+ }
56825+
56826+ execlen += len;
56827+ *(grarg + execlen) = ' ';
56828+ *(grarg + execlen + 1) = '\0';
56829+ execlen++;
56830+ }
56831+
56832+ log:
56833+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
56834+ bprm->file->f_path.mnt, grarg);
56835+ mutex_unlock(&gr_exec_arg_mutex);
56836+#endif
56837+ return;
56838+}
56839+
56840+#ifdef CONFIG_GRKERNSEC
56841+extern int gr_acl_is_capable(const int cap);
56842+extern int gr_acl_is_capable_nolog(const int cap);
56843+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
56844+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
56845+extern int gr_chroot_is_capable(const int cap);
56846+extern int gr_chroot_is_capable_nolog(const int cap);
56847+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
56848+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
56849+#endif
56850+
56851+const char *captab_log[] = {
56852+ "CAP_CHOWN",
56853+ "CAP_DAC_OVERRIDE",
56854+ "CAP_DAC_READ_SEARCH",
56855+ "CAP_FOWNER",
56856+ "CAP_FSETID",
56857+ "CAP_KILL",
56858+ "CAP_SETGID",
56859+ "CAP_SETUID",
56860+ "CAP_SETPCAP",
56861+ "CAP_LINUX_IMMUTABLE",
56862+ "CAP_NET_BIND_SERVICE",
56863+ "CAP_NET_BROADCAST",
56864+ "CAP_NET_ADMIN",
56865+ "CAP_NET_RAW",
56866+ "CAP_IPC_LOCK",
56867+ "CAP_IPC_OWNER",
56868+ "CAP_SYS_MODULE",
56869+ "CAP_SYS_RAWIO",
56870+ "CAP_SYS_CHROOT",
56871+ "CAP_SYS_PTRACE",
56872+ "CAP_SYS_PACCT",
56873+ "CAP_SYS_ADMIN",
56874+ "CAP_SYS_BOOT",
56875+ "CAP_SYS_NICE",
56876+ "CAP_SYS_RESOURCE",
56877+ "CAP_SYS_TIME",
56878+ "CAP_SYS_TTY_CONFIG",
56879+ "CAP_MKNOD",
56880+ "CAP_LEASE",
56881+ "CAP_AUDIT_WRITE",
56882+ "CAP_AUDIT_CONTROL",
56883+ "CAP_SETFCAP",
56884+ "CAP_MAC_OVERRIDE",
56885+ "CAP_MAC_ADMIN",
56886+ "CAP_SYSLOG",
56887+ "CAP_WAKE_ALARM"
56888+};
56889+
56890+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
56891+
56892+int gr_is_capable(const int cap)
56893+{
56894+#ifdef CONFIG_GRKERNSEC
56895+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
56896+ return 1;
56897+ return 0;
56898+#else
56899+ return 1;
56900+#endif
56901+}
56902+
56903+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
56904+{
56905+#ifdef CONFIG_GRKERNSEC
56906+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
56907+ return 1;
56908+ return 0;
56909+#else
56910+ return 1;
56911+#endif
56912+}
56913+
56914+int gr_is_capable_nolog(const int cap)
56915+{
56916+#ifdef CONFIG_GRKERNSEC
56917+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
56918+ return 1;
56919+ return 0;
56920+#else
56921+ return 1;
56922+#endif
56923+}
56924+
56925+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
56926+{
56927+#ifdef CONFIG_GRKERNSEC
56928+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
56929+ return 1;
56930+ return 0;
56931+#else
56932+ return 1;
56933+#endif
56934+}
56935+
56936+EXPORT_SYMBOL(gr_is_capable);
56937+EXPORT_SYMBOL(gr_is_capable_nolog);
56938+EXPORT_SYMBOL(gr_task_is_capable);
56939+EXPORT_SYMBOL(gr_task_is_capable_nolog);
56940diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
56941new file mode 100644
56942index 0000000..d3ee748
56943--- /dev/null
56944+++ b/grsecurity/grsec_fifo.c
56945@@ -0,0 +1,24 @@
56946+#include <linux/kernel.h>
56947+#include <linux/sched.h>
56948+#include <linux/fs.h>
56949+#include <linux/file.h>
56950+#include <linux/grinternal.h>
56951+
56952+int
56953+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
56954+ const struct dentry *dir, const int flag, const int acc_mode)
56955+{
56956+#ifdef CONFIG_GRKERNSEC_FIFO
56957+ const struct cred *cred = current_cred();
56958+
56959+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
56960+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
56961+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
56962+ (cred->fsuid != dentry->d_inode->i_uid)) {
56963+ if (!inode_permission(dentry->d_inode, acc_mode))
56964+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
56965+ return -EACCES;
56966+ }
56967+#endif
56968+ return 0;
56969+}
56970diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
56971new file mode 100644
56972index 0000000..8ca18bf
56973--- /dev/null
56974+++ b/grsecurity/grsec_fork.c
56975@@ -0,0 +1,23 @@
56976+#include <linux/kernel.h>
56977+#include <linux/sched.h>
56978+#include <linux/grsecurity.h>
56979+#include <linux/grinternal.h>
56980+#include <linux/errno.h>
56981+
56982+void
56983+gr_log_forkfail(const int retval)
56984+{
56985+#ifdef CONFIG_GRKERNSEC_FORKFAIL
56986+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
56987+ switch (retval) {
56988+ case -EAGAIN:
56989+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
56990+ break;
56991+ case -ENOMEM:
56992+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
56993+ break;
56994+ }
56995+ }
56996+#endif
56997+ return;
56998+}
56999diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
57000new file mode 100644
57001index 0000000..01ddde4
57002--- /dev/null
57003+++ b/grsecurity/grsec_init.c
57004@@ -0,0 +1,277 @@
57005+#include <linux/kernel.h>
57006+#include <linux/sched.h>
57007+#include <linux/mm.h>
57008+#include <linux/gracl.h>
57009+#include <linux/slab.h>
57010+#include <linux/vmalloc.h>
57011+#include <linux/percpu.h>
57012+#include <linux/module.h>
57013+
57014+int grsec_enable_ptrace_readexec;
57015+int grsec_enable_setxid;
57016+int grsec_enable_brute;
57017+int grsec_enable_link;
57018+int grsec_enable_dmesg;
57019+int grsec_enable_harden_ptrace;
57020+int grsec_enable_fifo;
57021+int grsec_enable_execlog;
57022+int grsec_enable_signal;
57023+int grsec_enable_forkfail;
57024+int grsec_enable_audit_ptrace;
57025+int grsec_enable_time;
57026+int grsec_enable_audit_textrel;
57027+int grsec_enable_group;
57028+int grsec_audit_gid;
57029+int grsec_enable_chdir;
57030+int grsec_enable_mount;
57031+int grsec_enable_rofs;
57032+int grsec_enable_chroot_findtask;
57033+int grsec_enable_chroot_mount;
57034+int grsec_enable_chroot_shmat;
57035+int grsec_enable_chroot_fchdir;
57036+int grsec_enable_chroot_double;
57037+int grsec_enable_chroot_pivot;
57038+int grsec_enable_chroot_chdir;
57039+int grsec_enable_chroot_chmod;
57040+int grsec_enable_chroot_mknod;
57041+int grsec_enable_chroot_nice;
57042+int grsec_enable_chroot_execlog;
57043+int grsec_enable_chroot_caps;
57044+int grsec_enable_chroot_sysctl;
57045+int grsec_enable_chroot_unix;
57046+int grsec_enable_tpe;
57047+int grsec_tpe_gid;
57048+int grsec_enable_blackhole;
57049+#ifdef CONFIG_IPV6_MODULE
57050+EXPORT_SYMBOL(grsec_enable_blackhole);
57051+#endif
57052+int grsec_lastack_retries;
57053+int grsec_enable_tpe_all;
57054+int grsec_enable_tpe_invert;
57055+int grsec_enable_socket_all;
57056+int grsec_socket_all_gid;
57057+int grsec_enable_socket_client;
57058+int grsec_socket_client_gid;
57059+int grsec_enable_socket_server;
57060+int grsec_socket_server_gid;
57061+int grsec_resource_logging;
57062+int grsec_disable_privio;
57063+int grsec_enable_log_rwxmaps;
57064+int grsec_lock;
57065+
57066+DEFINE_SPINLOCK(grsec_alert_lock);
57067+unsigned long grsec_alert_wtime = 0;
57068+unsigned long grsec_alert_fyet = 0;
57069+
57070+DEFINE_SPINLOCK(grsec_audit_lock);
57071+
57072+DEFINE_RWLOCK(grsec_exec_file_lock);
57073+
57074+char *gr_shared_page[4];
57075+
57076+char *gr_alert_log_fmt;
57077+char *gr_audit_log_fmt;
57078+char *gr_alert_log_buf;
57079+char *gr_audit_log_buf;
57080+
57081+extern struct gr_arg *gr_usermode;
57082+extern unsigned char *gr_system_salt;
57083+extern unsigned char *gr_system_sum;
57084+
57085+void __init
57086+grsecurity_init(void)
57087+{
57088+ int j;
57089+ /* create the per-cpu shared pages */
57090+
57091+#ifdef CONFIG_X86
57092+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
57093+#endif
57094+
57095+ for (j = 0; j < 4; j++) {
57096+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
57097+ if (gr_shared_page[j] == NULL) {
57098+ panic("Unable to allocate grsecurity shared page");
57099+ return;
57100+ }
57101+ }
57102+
57103+ /* allocate log buffers */
57104+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
57105+ if (!gr_alert_log_fmt) {
57106+ panic("Unable to allocate grsecurity alert log format buffer");
57107+ return;
57108+ }
57109+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
57110+ if (!gr_audit_log_fmt) {
57111+ panic("Unable to allocate grsecurity audit log format buffer");
57112+ return;
57113+ }
57114+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57115+ if (!gr_alert_log_buf) {
57116+ panic("Unable to allocate grsecurity alert log buffer");
57117+ return;
57118+ }
57119+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57120+ if (!gr_audit_log_buf) {
57121+ panic("Unable to allocate grsecurity audit log buffer");
57122+ return;
57123+ }
57124+
57125+ /* allocate memory for authentication structure */
57126+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
57127+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
57128+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
57129+
57130+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
57131+ panic("Unable to allocate grsecurity authentication structure");
57132+ return;
57133+ }
57134+
57135+
57136+#ifdef CONFIG_GRKERNSEC_IO
57137+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
57138+ grsec_disable_privio = 1;
57139+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57140+ grsec_disable_privio = 1;
57141+#else
57142+ grsec_disable_privio = 0;
57143+#endif
57144+#endif
57145+
57146+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
57147+ /* for backward compatibility, tpe_invert always defaults to on if
57148+ enabled in the kernel
57149+ */
57150+ grsec_enable_tpe_invert = 1;
57151+#endif
57152+
57153+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57154+#ifndef CONFIG_GRKERNSEC_SYSCTL
57155+ grsec_lock = 1;
57156+#endif
57157+
57158+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57159+ grsec_enable_audit_textrel = 1;
57160+#endif
57161+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57162+ grsec_enable_log_rwxmaps = 1;
57163+#endif
57164+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
57165+ grsec_enable_group = 1;
57166+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
57167+#endif
57168+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
57169+ grsec_enable_ptrace_readexec = 1;
57170+#endif
57171+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57172+ grsec_enable_chdir = 1;
57173+#endif
57174+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
57175+ grsec_enable_harden_ptrace = 1;
57176+#endif
57177+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57178+ grsec_enable_mount = 1;
57179+#endif
57180+#ifdef CONFIG_GRKERNSEC_LINK
57181+ grsec_enable_link = 1;
57182+#endif
57183+#ifdef CONFIG_GRKERNSEC_BRUTE
57184+ grsec_enable_brute = 1;
57185+#endif
57186+#ifdef CONFIG_GRKERNSEC_DMESG
57187+ grsec_enable_dmesg = 1;
57188+#endif
57189+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
57190+ grsec_enable_blackhole = 1;
57191+ grsec_lastack_retries = 4;
57192+#endif
57193+#ifdef CONFIG_GRKERNSEC_FIFO
57194+ grsec_enable_fifo = 1;
57195+#endif
57196+#ifdef CONFIG_GRKERNSEC_EXECLOG
57197+ grsec_enable_execlog = 1;
57198+#endif
57199+#ifdef CONFIG_GRKERNSEC_SETXID
57200+ grsec_enable_setxid = 1;
57201+#endif
57202+#ifdef CONFIG_GRKERNSEC_SIGNAL
57203+ grsec_enable_signal = 1;
57204+#endif
57205+#ifdef CONFIG_GRKERNSEC_FORKFAIL
57206+ grsec_enable_forkfail = 1;
57207+#endif
57208+#ifdef CONFIG_GRKERNSEC_TIME
57209+ grsec_enable_time = 1;
57210+#endif
57211+#ifdef CONFIG_GRKERNSEC_RESLOG
57212+ grsec_resource_logging = 1;
57213+#endif
57214+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57215+ grsec_enable_chroot_findtask = 1;
57216+#endif
57217+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
57218+ grsec_enable_chroot_unix = 1;
57219+#endif
57220+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
57221+ grsec_enable_chroot_mount = 1;
57222+#endif
57223+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
57224+ grsec_enable_chroot_fchdir = 1;
57225+#endif
57226+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
57227+ grsec_enable_chroot_shmat = 1;
57228+#endif
57229+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57230+ grsec_enable_audit_ptrace = 1;
57231+#endif
57232+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
57233+ grsec_enable_chroot_double = 1;
57234+#endif
57235+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
57236+ grsec_enable_chroot_pivot = 1;
57237+#endif
57238+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
57239+ grsec_enable_chroot_chdir = 1;
57240+#endif
57241+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
57242+ grsec_enable_chroot_chmod = 1;
57243+#endif
57244+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
57245+ grsec_enable_chroot_mknod = 1;
57246+#endif
57247+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57248+ grsec_enable_chroot_nice = 1;
57249+#endif
57250+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
57251+ grsec_enable_chroot_execlog = 1;
57252+#endif
57253+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57254+ grsec_enable_chroot_caps = 1;
57255+#endif
57256+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
57257+ grsec_enable_chroot_sysctl = 1;
57258+#endif
57259+#ifdef CONFIG_GRKERNSEC_TPE
57260+ grsec_enable_tpe = 1;
57261+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
57262+#ifdef CONFIG_GRKERNSEC_TPE_ALL
57263+ grsec_enable_tpe_all = 1;
57264+#endif
57265+#endif
57266+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
57267+ grsec_enable_socket_all = 1;
57268+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
57269+#endif
57270+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
57271+ grsec_enable_socket_client = 1;
57272+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
57273+#endif
57274+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
57275+ grsec_enable_socket_server = 1;
57276+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
57277+#endif
57278+#endif
57279+
57280+ return;
57281+}
57282diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
57283new file mode 100644
57284index 0000000..3efe141
57285--- /dev/null
57286+++ b/grsecurity/grsec_link.c
57287@@ -0,0 +1,43 @@
57288+#include <linux/kernel.h>
57289+#include <linux/sched.h>
57290+#include <linux/fs.h>
57291+#include <linux/file.h>
57292+#include <linux/grinternal.h>
57293+
57294+int
57295+gr_handle_follow_link(const struct inode *parent,
57296+ const struct inode *inode,
57297+ const struct dentry *dentry, const struct vfsmount *mnt)
57298+{
57299+#ifdef CONFIG_GRKERNSEC_LINK
57300+ const struct cred *cred = current_cred();
57301+
57302+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
57303+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
57304+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
57305+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
57306+ return -EACCES;
57307+ }
57308+#endif
57309+ return 0;
57310+}
57311+
57312+int
57313+gr_handle_hardlink(const struct dentry *dentry,
57314+ const struct vfsmount *mnt,
57315+ struct inode *inode, const int mode, const char *to)
57316+{
57317+#ifdef CONFIG_GRKERNSEC_LINK
57318+ const struct cred *cred = current_cred();
57319+
57320+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
57321+ (!S_ISREG(mode) || (mode & S_ISUID) ||
57322+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
57323+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
57324+ !capable(CAP_FOWNER) && cred->uid) {
57325+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
57326+ return -EPERM;
57327+ }
57328+#endif
57329+ return 0;
57330+}
57331diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
57332new file mode 100644
57333index 0000000..a45d2e9
57334--- /dev/null
57335+++ b/grsecurity/grsec_log.c
57336@@ -0,0 +1,322 @@
57337+#include <linux/kernel.h>
57338+#include <linux/sched.h>
57339+#include <linux/file.h>
57340+#include <linux/tty.h>
57341+#include <linux/fs.h>
57342+#include <linux/grinternal.h>
57343+
57344+#ifdef CONFIG_TREE_PREEMPT_RCU
57345+#define DISABLE_PREEMPT() preempt_disable()
57346+#define ENABLE_PREEMPT() preempt_enable()
57347+#else
57348+#define DISABLE_PREEMPT()
57349+#define ENABLE_PREEMPT()
57350+#endif
57351+
57352+#define BEGIN_LOCKS(x) \
57353+ DISABLE_PREEMPT(); \
57354+ rcu_read_lock(); \
57355+ read_lock(&tasklist_lock); \
57356+ read_lock(&grsec_exec_file_lock); \
57357+ if (x != GR_DO_AUDIT) \
57358+ spin_lock(&grsec_alert_lock); \
57359+ else \
57360+ spin_lock(&grsec_audit_lock)
57361+
57362+#define END_LOCKS(x) \
57363+ if (x != GR_DO_AUDIT) \
57364+ spin_unlock(&grsec_alert_lock); \
57365+ else \
57366+ spin_unlock(&grsec_audit_lock); \
57367+ read_unlock(&grsec_exec_file_lock); \
57368+ read_unlock(&tasklist_lock); \
57369+ rcu_read_unlock(); \
57370+ ENABLE_PREEMPT(); \
57371+ if (x == GR_DONT_AUDIT) \
57372+ gr_handle_alertkill(current)
57373+
57374+enum {
57375+ FLOODING,
57376+ NO_FLOODING
57377+};
57378+
57379+extern char *gr_alert_log_fmt;
57380+extern char *gr_audit_log_fmt;
57381+extern char *gr_alert_log_buf;
57382+extern char *gr_audit_log_buf;
57383+
57384+static int gr_log_start(int audit)
57385+{
57386+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
57387+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
57388+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57389+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
57390+ unsigned long curr_secs = get_seconds();
57391+
57392+ if (audit == GR_DO_AUDIT)
57393+ goto set_fmt;
57394+
57395+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
57396+ grsec_alert_wtime = curr_secs;
57397+ grsec_alert_fyet = 0;
57398+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
57399+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
57400+ grsec_alert_fyet++;
57401+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
57402+ grsec_alert_wtime = curr_secs;
57403+ grsec_alert_fyet++;
57404+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
57405+ return FLOODING;
57406+ }
57407+ else return FLOODING;
57408+
57409+set_fmt:
57410+#endif
57411+ memset(buf, 0, PAGE_SIZE);
57412+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
57413+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
57414+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57415+ } else if (current->signal->curr_ip) {
57416+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
57417+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
57418+ } else if (gr_acl_is_enabled()) {
57419+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
57420+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57421+ } else {
57422+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
57423+ strcpy(buf, fmt);
57424+ }
57425+
57426+ return NO_FLOODING;
57427+}
57428+
57429+static void gr_log_middle(int audit, const char *msg, va_list ap)
57430+ __attribute__ ((format (printf, 2, 0)));
57431+
57432+static void gr_log_middle(int audit, const char *msg, va_list ap)
57433+{
57434+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57435+ unsigned int len = strlen(buf);
57436+
57437+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57438+
57439+ return;
57440+}
57441+
57442+static void gr_log_middle_varargs(int audit, const char *msg, ...)
57443+ __attribute__ ((format (printf, 2, 3)));
57444+
57445+static void gr_log_middle_varargs(int audit, const char *msg, ...)
57446+{
57447+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57448+ unsigned int len = strlen(buf);
57449+ va_list ap;
57450+
57451+ va_start(ap, msg);
57452+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57453+ va_end(ap);
57454+
57455+ return;
57456+}
57457+
57458+static void gr_log_end(int audit, int append_default)
57459+{
57460+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57461+
57462+ if (append_default) {
57463+ unsigned int len = strlen(buf);
57464+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
57465+ }
57466+
57467+ printk("%s\n", buf);
57468+
57469+ return;
57470+}
57471+
57472+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
57473+{
57474+ int logtype;
57475+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
57476+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
57477+ void *voidptr = NULL;
57478+ int num1 = 0, num2 = 0;
57479+ unsigned long ulong1 = 0, ulong2 = 0;
57480+ struct dentry *dentry = NULL;
57481+ struct vfsmount *mnt = NULL;
57482+ struct file *file = NULL;
57483+ struct task_struct *task = NULL;
57484+ const struct cred *cred, *pcred;
57485+ va_list ap;
57486+
57487+ BEGIN_LOCKS(audit);
57488+ logtype = gr_log_start(audit);
57489+ if (logtype == FLOODING) {
57490+ END_LOCKS(audit);
57491+ return;
57492+ }
57493+ va_start(ap, argtypes);
57494+ switch (argtypes) {
57495+ case GR_TTYSNIFF:
57496+ task = va_arg(ap, struct task_struct *);
57497+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
57498+ break;
57499+ case GR_SYSCTL_HIDDEN:
57500+ str1 = va_arg(ap, char *);
57501+ gr_log_middle_varargs(audit, msg, result, str1);
57502+ break;
57503+ case GR_RBAC:
57504+ dentry = va_arg(ap, struct dentry *);
57505+ mnt = va_arg(ap, struct vfsmount *);
57506+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
57507+ break;
57508+ case GR_RBAC_STR:
57509+ dentry = va_arg(ap, struct dentry *);
57510+ mnt = va_arg(ap, struct vfsmount *);
57511+ str1 = va_arg(ap, char *);
57512+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
57513+ break;
57514+ case GR_STR_RBAC:
57515+ str1 = va_arg(ap, char *);
57516+ dentry = va_arg(ap, struct dentry *);
57517+ mnt = va_arg(ap, struct vfsmount *);
57518+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
57519+ break;
57520+ case GR_RBAC_MODE2:
57521+ dentry = va_arg(ap, struct dentry *);
57522+ mnt = va_arg(ap, struct vfsmount *);
57523+ str1 = va_arg(ap, char *);
57524+ str2 = va_arg(ap, char *);
57525+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
57526+ break;
57527+ case GR_RBAC_MODE3:
57528+ dentry = va_arg(ap, struct dentry *);
57529+ mnt = va_arg(ap, struct vfsmount *);
57530+ str1 = va_arg(ap, char *);
57531+ str2 = va_arg(ap, char *);
57532+ str3 = va_arg(ap, char *);
57533+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
57534+ break;
57535+ case GR_FILENAME:
57536+ dentry = va_arg(ap, struct dentry *);
57537+ mnt = va_arg(ap, struct vfsmount *);
57538+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
57539+ break;
57540+ case GR_STR_FILENAME:
57541+ str1 = va_arg(ap, char *);
57542+ dentry = va_arg(ap, struct dentry *);
57543+ mnt = va_arg(ap, struct vfsmount *);
57544+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
57545+ break;
57546+ case GR_FILENAME_STR:
57547+ dentry = va_arg(ap, struct dentry *);
57548+ mnt = va_arg(ap, struct vfsmount *);
57549+ str1 = va_arg(ap, char *);
57550+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
57551+ break;
57552+ case GR_FILENAME_TWO_INT:
57553+ dentry = va_arg(ap, struct dentry *);
57554+ mnt = va_arg(ap, struct vfsmount *);
57555+ num1 = va_arg(ap, int);
57556+ num2 = va_arg(ap, int);
57557+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
57558+ break;
57559+ case GR_FILENAME_TWO_INT_STR:
57560+ dentry = va_arg(ap, struct dentry *);
57561+ mnt = va_arg(ap, struct vfsmount *);
57562+ num1 = va_arg(ap, int);
57563+ num2 = va_arg(ap, int);
57564+ str1 = va_arg(ap, char *);
57565+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
57566+ break;
57567+ case GR_TEXTREL:
57568+ file = va_arg(ap, struct file *);
57569+ ulong1 = va_arg(ap, unsigned long);
57570+ ulong2 = va_arg(ap, unsigned long);
57571+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
57572+ break;
57573+ case GR_PTRACE:
57574+ task = va_arg(ap, struct task_struct *);
57575+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
57576+ break;
57577+ case GR_RESOURCE:
57578+ task = va_arg(ap, struct task_struct *);
57579+ cred = __task_cred(task);
57580+ pcred = __task_cred(task->real_parent);
57581+ ulong1 = va_arg(ap, unsigned long);
57582+ str1 = va_arg(ap, char *);
57583+ ulong2 = va_arg(ap, unsigned long);
57584+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57585+ break;
57586+ case GR_CAP:
57587+ task = va_arg(ap, struct task_struct *);
57588+ cred = __task_cred(task);
57589+ pcred = __task_cred(task->real_parent);
57590+ str1 = va_arg(ap, char *);
57591+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57592+ break;
57593+ case GR_SIG:
57594+ str1 = va_arg(ap, char *);
57595+ voidptr = va_arg(ap, void *);
57596+ gr_log_middle_varargs(audit, msg, str1, voidptr);
57597+ break;
57598+ case GR_SIG2:
57599+ task = va_arg(ap, struct task_struct *);
57600+ cred = __task_cred(task);
57601+ pcred = __task_cred(task->real_parent);
57602+ num1 = va_arg(ap, int);
57603+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57604+ break;
57605+ case GR_CRASH1:
57606+ task = va_arg(ap, struct task_struct *);
57607+ cred = __task_cred(task);
57608+ pcred = __task_cred(task->real_parent);
57609+ ulong1 = va_arg(ap, unsigned long);
57610+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
57611+ break;
57612+ case GR_CRASH2:
57613+ task = va_arg(ap, struct task_struct *);
57614+ cred = __task_cred(task);
57615+ pcred = __task_cred(task->real_parent);
57616+ ulong1 = va_arg(ap, unsigned long);
57617+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
57618+ break;
57619+ case GR_RWXMAP:
57620+ file = va_arg(ap, struct file *);
57621+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
57622+ break;
57623+ case GR_PSACCT:
57624+ {
57625+ unsigned int wday, cday;
57626+ __u8 whr, chr;
57627+ __u8 wmin, cmin;
57628+ __u8 wsec, csec;
57629+ char cur_tty[64] = { 0 };
57630+ char parent_tty[64] = { 0 };
57631+
57632+ task = va_arg(ap, struct task_struct *);
57633+ wday = va_arg(ap, unsigned int);
57634+ cday = va_arg(ap, unsigned int);
57635+ whr = va_arg(ap, int);
57636+ chr = va_arg(ap, int);
57637+ wmin = va_arg(ap, int);
57638+ cmin = va_arg(ap, int);
57639+ wsec = va_arg(ap, int);
57640+ csec = va_arg(ap, int);
57641+ ulong1 = va_arg(ap, unsigned long);
57642+ cred = __task_cred(task);
57643+ pcred = __task_cred(task->real_parent);
57644+
57645+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57646+ }
57647+ break;
57648+ default:
57649+ gr_log_middle(audit, msg, ap);
57650+ }
57651+ va_end(ap);
57652+ // these don't need DEFAULTSECARGS printed on the end
57653+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
57654+ gr_log_end(audit, 0);
57655+ else
57656+ gr_log_end(audit, 1);
57657+ END_LOCKS(audit);
57658+}
57659diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
57660new file mode 100644
57661index 0000000..f536303
57662--- /dev/null
57663+++ b/grsecurity/grsec_mem.c
57664@@ -0,0 +1,40 @@
57665+#include <linux/kernel.h>
57666+#include <linux/sched.h>
57667+#include <linux/mm.h>
57668+#include <linux/mman.h>
57669+#include <linux/grinternal.h>
57670+
57671+void
57672+gr_handle_ioperm(void)
57673+{
57674+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
57675+ return;
57676+}
57677+
57678+void
57679+gr_handle_iopl(void)
57680+{
57681+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
57682+ return;
57683+}
57684+
57685+void
57686+gr_handle_mem_readwrite(u64 from, u64 to)
57687+{
57688+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
57689+ return;
57690+}
57691+
57692+void
57693+gr_handle_vm86(void)
57694+{
57695+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
57696+ return;
57697+}
57698+
57699+void
57700+gr_log_badprocpid(const char *entry)
57701+{
57702+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
57703+ return;
57704+}
57705diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
57706new file mode 100644
57707index 0000000..2131422
57708--- /dev/null
57709+++ b/grsecurity/grsec_mount.c
57710@@ -0,0 +1,62 @@
57711+#include <linux/kernel.h>
57712+#include <linux/sched.h>
57713+#include <linux/mount.h>
57714+#include <linux/grsecurity.h>
57715+#include <linux/grinternal.h>
57716+
57717+void
57718+gr_log_remount(const char *devname, const int retval)
57719+{
57720+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57721+ if (grsec_enable_mount && (retval >= 0))
57722+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
57723+#endif
57724+ return;
57725+}
57726+
57727+void
57728+gr_log_unmount(const char *devname, const int retval)
57729+{
57730+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57731+ if (grsec_enable_mount && (retval >= 0))
57732+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
57733+#endif
57734+ return;
57735+}
57736+
57737+void
57738+gr_log_mount(const char *from, const char *to, const int retval)
57739+{
57740+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57741+ if (grsec_enable_mount && (retval >= 0))
57742+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
57743+#endif
57744+ return;
57745+}
57746+
57747+int
57748+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
57749+{
57750+#ifdef CONFIG_GRKERNSEC_ROFS
57751+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
57752+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
57753+ return -EPERM;
57754+ } else
57755+ return 0;
57756+#endif
57757+ return 0;
57758+}
57759+
57760+int
57761+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
57762+{
57763+#ifdef CONFIG_GRKERNSEC_ROFS
57764+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
57765+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
57766+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
57767+ return -EPERM;
57768+ } else
57769+ return 0;
57770+#endif
57771+ return 0;
57772+}
57773diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
57774new file mode 100644
57775index 0000000..a3b12a0
57776--- /dev/null
57777+++ b/grsecurity/grsec_pax.c
57778@@ -0,0 +1,36 @@
57779+#include <linux/kernel.h>
57780+#include <linux/sched.h>
57781+#include <linux/mm.h>
57782+#include <linux/file.h>
57783+#include <linux/grinternal.h>
57784+#include <linux/grsecurity.h>
57785+
57786+void
57787+gr_log_textrel(struct vm_area_struct * vma)
57788+{
57789+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57790+ if (grsec_enable_audit_textrel)
57791+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
57792+#endif
57793+ return;
57794+}
57795+
57796+void
57797+gr_log_rwxmmap(struct file *file)
57798+{
57799+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57800+ if (grsec_enable_log_rwxmaps)
57801+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
57802+#endif
57803+ return;
57804+}
57805+
57806+void
57807+gr_log_rwxmprotect(struct file *file)
57808+{
57809+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57810+ if (grsec_enable_log_rwxmaps)
57811+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
57812+#endif
57813+ return;
57814+}
57815diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
57816new file mode 100644
57817index 0000000..f7f29aa
57818--- /dev/null
57819+++ b/grsecurity/grsec_ptrace.c
57820@@ -0,0 +1,30 @@
57821+#include <linux/kernel.h>
57822+#include <linux/sched.h>
57823+#include <linux/grinternal.h>
57824+#include <linux/security.h>
57825+
57826+void
57827+gr_audit_ptrace(struct task_struct *task)
57828+{
57829+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57830+ if (grsec_enable_audit_ptrace)
57831+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
57832+#endif
57833+ return;
57834+}
57835+
57836+int
57837+gr_ptrace_readexec(struct file *file, int unsafe_flags)
57838+{
57839+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
57840+ const struct dentry *dentry = file->f_path.dentry;
57841+ const struct vfsmount *mnt = file->f_path.mnt;
57842+
57843+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
57844+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
57845+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
57846+ return -EACCES;
57847+ }
57848+#endif
57849+ return 0;
57850+}
57851diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
57852new file mode 100644
57853index 0000000..7a5b2de
57854--- /dev/null
57855+++ b/grsecurity/grsec_sig.c
57856@@ -0,0 +1,207 @@
57857+#include <linux/kernel.h>
57858+#include <linux/sched.h>
57859+#include <linux/delay.h>
57860+#include <linux/grsecurity.h>
57861+#include <linux/grinternal.h>
57862+#include <linux/hardirq.h>
57863+
57864+char *signames[] = {
57865+ [SIGSEGV] = "Segmentation fault",
57866+ [SIGILL] = "Illegal instruction",
57867+ [SIGABRT] = "Abort",
57868+ [SIGBUS] = "Invalid alignment/Bus error"
57869+};
57870+
57871+void
57872+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
57873+{
57874+#ifdef CONFIG_GRKERNSEC_SIGNAL
57875+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
57876+ (sig == SIGABRT) || (sig == SIGBUS))) {
57877+ if (t->pid == current->pid) {
57878+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
57879+ } else {
57880+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
57881+ }
57882+ }
57883+#endif
57884+ return;
57885+}
57886+
57887+int
57888+gr_handle_signal(const struct task_struct *p, const int sig)
57889+{
57890+#ifdef CONFIG_GRKERNSEC
57891+ /* ignore the 0 signal for protected task checks */
57892+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
57893+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
57894+ return -EPERM;
57895+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
57896+ return -EPERM;
57897+ }
57898+#endif
57899+ return 0;
57900+}
57901+
57902+#ifdef CONFIG_GRKERNSEC
57903+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
57904+
57905+int gr_fake_force_sig(int sig, struct task_struct *t)
57906+{
57907+ unsigned long int flags;
57908+ int ret, blocked, ignored;
57909+ struct k_sigaction *action;
57910+
57911+ spin_lock_irqsave(&t->sighand->siglock, flags);
57912+ action = &t->sighand->action[sig-1];
57913+ ignored = action->sa.sa_handler == SIG_IGN;
57914+ blocked = sigismember(&t->blocked, sig);
57915+ if (blocked || ignored) {
57916+ action->sa.sa_handler = SIG_DFL;
57917+ if (blocked) {
57918+ sigdelset(&t->blocked, sig);
57919+ recalc_sigpending_and_wake(t);
57920+ }
57921+ }
57922+ if (action->sa.sa_handler == SIG_DFL)
57923+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
57924+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
57925+
57926+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
57927+
57928+ return ret;
57929+}
57930+#endif
57931+
57932+#ifdef CONFIG_GRKERNSEC_BRUTE
57933+#define GR_USER_BAN_TIME (15 * 60)
57934+
57935+static int __get_dumpable(unsigned long mm_flags)
57936+{
57937+ int ret;
57938+
57939+ ret = mm_flags & MMF_DUMPABLE_MASK;
57940+ return (ret >= 2) ? 2 : ret;
57941+}
57942+#endif
57943+
57944+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
57945+{
57946+#ifdef CONFIG_GRKERNSEC_BRUTE
57947+ uid_t uid = 0;
57948+
57949+ if (!grsec_enable_brute)
57950+ return;
57951+
57952+ rcu_read_lock();
57953+ read_lock(&tasklist_lock);
57954+ read_lock(&grsec_exec_file_lock);
57955+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
57956+ p->real_parent->brute = 1;
57957+ else {
57958+ const struct cred *cred = __task_cred(p), *cred2;
57959+ struct task_struct *tsk, *tsk2;
57960+
57961+ if (!__get_dumpable(mm_flags) && cred->uid) {
57962+ struct user_struct *user;
57963+
57964+ uid = cred->uid;
57965+
57966+ /* this is put upon execution past expiration */
57967+ user = find_user(uid);
57968+ if (user == NULL)
57969+ goto unlock;
57970+ user->banned = 1;
57971+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
57972+ if (user->ban_expires == ~0UL)
57973+ user->ban_expires--;
57974+
57975+ do_each_thread(tsk2, tsk) {
57976+ cred2 = __task_cred(tsk);
57977+ if (tsk != p && cred2->uid == uid)
57978+ gr_fake_force_sig(SIGKILL, tsk);
57979+ } while_each_thread(tsk2, tsk);
57980+ }
57981+ }
57982+unlock:
57983+ read_unlock(&grsec_exec_file_lock);
57984+ read_unlock(&tasklist_lock);
57985+ rcu_read_unlock();
57986+
57987+ if (uid)
57988+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
57989+
57990+#endif
57991+ return;
57992+}
57993+
57994+void gr_handle_brute_check(void)
57995+{
57996+#ifdef CONFIG_GRKERNSEC_BRUTE
57997+ if (current->brute)
57998+ msleep(30 * 1000);
57999+#endif
58000+ return;
58001+}
58002+
58003+void gr_handle_kernel_exploit(void)
58004+{
58005+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
58006+ const struct cred *cred;
58007+ struct task_struct *tsk, *tsk2;
58008+ struct user_struct *user;
58009+ uid_t uid;
58010+
58011+ if (in_irq() || in_serving_softirq() || in_nmi())
58012+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
58013+
58014+ uid = current_uid();
58015+
58016+ if (uid == 0)
58017+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
58018+ else {
58019+ /* kill all the processes of this user, hold a reference
58020+ to their creds struct, and prevent them from creating
58021+ another process until system reset
58022+ */
58023+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
58024+ /* we intentionally leak this ref */
58025+ user = get_uid(current->cred->user);
58026+ if (user) {
58027+ user->banned = 1;
58028+ user->ban_expires = ~0UL;
58029+ }
58030+
58031+ read_lock(&tasklist_lock);
58032+ do_each_thread(tsk2, tsk) {
58033+ cred = __task_cred(tsk);
58034+ if (cred->uid == uid)
58035+ gr_fake_force_sig(SIGKILL, tsk);
58036+ } while_each_thread(tsk2, tsk);
58037+ read_unlock(&tasklist_lock);
58038+ }
58039+#endif
58040+}
58041+
58042+int __gr_process_user_ban(struct user_struct *user)
58043+{
58044+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58045+ if (unlikely(user->banned)) {
58046+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
58047+ user->banned = 0;
58048+ user->ban_expires = 0;
58049+ free_uid(user);
58050+ } else
58051+ return -EPERM;
58052+ }
58053+#endif
58054+ return 0;
58055+}
58056+
58057+int gr_process_user_ban(void)
58058+{
58059+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58060+ return __gr_process_user_ban(current->cred->user);
58061+#endif
58062+ return 0;
58063+}
58064diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
58065new file mode 100644
58066index 0000000..4030d57
58067--- /dev/null
58068+++ b/grsecurity/grsec_sock.c
58069@@ -0,0 +1,244 @@
58070+#include <linux/kernel.h>
58071+#include <linux/module.h>
58072+#include <linux/sched.h>
58073+#include <linux/file.h>
58074+#include <linux/net.h>
58075+#include <linux/in.h>
58076+#include <linux/ip.h>
58077+#include <net/sock.h>
58078+#include <net/inet_sock.h>
58079+#include <linux/grsecurity.h>
58080+#include <linux/grinternal.h>
58081+#include <linux/gracl.h>
58082+
58083+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
58084+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
58085+
58086+EXPORT_SYMBOL(gr_search_udp_recvmsg);
58087+EXPORT_SYMBOL(gr_search_udp_sendmsg);
58088+
58089+#ifdef CONFIG_UNIX_MODULE
58090+EXPORT_SYMBOL(gr_acl_handle_unix);
58091+EXPORT_SYMBOL(gr_acl_handle_mknod);
58092+EXPORT_SYMBOL(gr_handle_chroot_unix);
58093+EXPORT_SYMBOL(gr_handle_create);
58094+#endif
58095+
58096+#ifdef CONFIG_GRKERNSEC
58097+#define gr_conn_table_size 32749
58098+struct conn_table_entry {
58099+ struct conn_table_entry *next;
58100+ struct signal_struct *sig;
58101+};
58102+
58103+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
58104+DEFINE_SPINLOCK(gr_conn_table_lock);
58105+
58106+extern const char * gr_socktype_to_name(unsigned char type);
58107+extern const char * gr_proto_to_name(unsigned char proto);
58108+extern const char * gr_sockfamily_to_name(unsigned char family);
58109+
58110+static __inline__ int
58111+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
58112+{
58113+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
58114+}
58115+
58116+static __inline__ int
58117+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
58118+ __u16 sport, __u16 dport)
58119+{
58120+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
58121+ sig->gr_sport == sport && sig->gr_dport == dport))
58122+ return 1;
58123+ else
58124+ return 0;
58125+}
58126+
58127+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
58128+{
58129+ struct conn_table_entry **match;
58130+ unsigned int index;
58131+
58132+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58133+ sig->gr_sport, sig->gr_dport,
58134+ gr_conn_table_size);
58135+
58136+ newent->sig = sig;
58137+
58138+ match = &gr_conn_table[index];
58139+ newent->next = *match;
58140+ *match = newent;
58141+
58142+ return;
58143+}
58144+
58145+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
58146+{
58147+ struct conn_table_entry *match, *last = NULL;
58148+ unsigned int index;
58149+
58150+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58151+ sig->gr_sport, sig->gr_dport,
58152+ gr_conn_table_size);
58153+
58154+ match = gr_conn_table[index];
58155+ while (match && !conn_match(match->sig,
58156+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
58157+ sig->gr_dport)) {
58158+ last = match;
58159+ match = match->next;
58160+ }
58161+
58162+ if (match) {
58163+ if (last)
58164+ last->next = match->next;
58165+ else
58166+ gr_conn_table[index] = NULL;
58167+ kfree(match);
58168+ }
58169+
58170+ return;
58171+}
58172+
58173+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
58174+ __u16 sport, __u16 dport)
58175+{
58176+ struct conn_table_entry *match;
58177+ unsigned int index;
58178+
58179+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
58180+
58181+ match = gr_conn_table[index];
58182+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
58183+ match = match->next;
58184+
58185+ if (match)
58186+ return match->sig;
58187+ else
58188+ return NULL;
58189+}
58190+
58191+#endif
58192+
58193+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
58194+{
58195+#ifdef CONFIG_GRKERNSEC
58196+ struct signal_struct *sig = task->signal;
58197+ struct conn_table_entry *newent;
58198+
58199+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
58200+ if (newent == NULL)
58201+ return;
58202+ /* no bh lock needed since we are called with bh disabled */
58203+ spin_lock(&gr_conn_table_lock);
58204+ gr_del_task_from_ip_table_nolock(sig);
58205+ sig->gr_saddr = inet->inet_rcv_saddr;
58206+ sig->gr_daddr = inet->inet_daddr;
58207+ sig->gr_sport = inet->inet_sport;
58208+ sig->gr_dport = inet->inet_dport;
58209+ gr_add_to_task_ip_table_nolock(sig, newent);
58210+ spin_unlock(&gr_conn_table_lock);
58211+#endif
58212+ return;
58213+}
58214+
58215+void gr_del_task_from_ip_table(struct task_struct *task)
58216+{
58217+#ifdef CONFIG_GRKERNSEC
58218+ spin_lock_bh(&gr_conn_table_lock);
58219+ gr_del_task_from_ip_table_nolock(task->signal);
58220+ spin_unlock_bh(&gr_conn_table_lock);
58221+#endif
58222+ return;
58223+}
58224+
58225+void
58226+gr_attach_curr_ip(const struct sock *sk)
58227+{
58228+#ifdef CONFIG_GRKERNSEC
58229+ struct signal_struct *p, *set;
58230+ const struct inet_sock *inet = inet_sk(sk);
58231+
58232+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
58233+ return;
58234+
58235+ set = current->signal;
58236+
58237+ spin_lock_bh(&gr_conn_table_lock);
58238+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
58239+ inet->inet_dport, inet->inet_sport);
58240+ if (unlikely(p != NULL)) {
58241+ set->curr_ip = p->curr_ip;
58242+ set->used_accept = 1;
58243+ gr_del_task_from_ip_table_nolock(p);
58244+ spin_unlock_bh(&gr_conn_table_lock);
58245+ return;
58246+ }
58247+ spin_unlock_bh(&gr_conn_table_lock);
58248+
58249+ set->curr_ip = inet->inet_daddr;
58250+ set->used_accept = 1;
58251+#endif
58252+ return;
58253+}
58254+
58255+int
58256+gr_handle_sock_all(const int family, const int type, const int protocol)
58257+{
58258+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58259+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
58260+ (family != AF_UNIX)) {
58261+ if (family == AF_INET)
58262+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
58263+ else
58264+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
58265+ return -EACCES;
58266+ }
58267+#endif
58268+ return 0;
58269+}
58270+
58271+int
58272+gr_handle_sock_server(const struct sockaddr *sck)
58273+{
58274+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58275+ if (grsec_enable_socket_server &&
58276+ in_group_p(grsec_socket_server_gid) &&
58277+ sck && (sck->sa_family != AF_UNIX) &&
58278+ (sck->sa_family != AF_LOCAL)) {
58279+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58280+ return -EACCES;
58281+ }
58282+#endif
58283+ return 0;
58284+}
58285+
58286+int
58287+gr_handle_sock_server_other(const struct sock *sck)
58288+{
58289+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58290+ if (grsec_enable_socket_server &&
58291+ in_group_p(grsec_socket_server_gid) &&
58292+ sck && (sck->sk_family != AF_UNIX) &&
58293+ (sck->sk_family != AF_LOCAL)) {
58294+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58295+ return -EACCES;
58296+ }
58297+#endif
58298+ return 0;
58299+}
58300+
58301+int
58302+gr_handle_sock_client(const struct sockaddr *sck)
58303+{
58304+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58305+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
58306+ sck && (sck->sa_family != AF_UNIX) &&
58307+ (sck->sa_family != AF_LOCAL)) {
58308+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
58309+ return -EACCES;
58310+ }
58311+#endif
58312+ return 0;
58313+}
58314diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
58315new file mode 100644
58316index 0000000..8316f6f
58317--- /dev/null
58318+++ b/grsecurity/grsec_sysctl.c
58319@@ -0,0 +1,453 @@
58320+#include <linux/kernel.h>
58321+#include <linux/sched.h>
58322+#include <linux/sysctl.h>
58323+#include <linux/grsecurity.h>
58324+#include <linux/grinternal.h>
58325+
58326+int
58327+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
58328+{
58329+#ifdef CONFIG_GRKERNSEC_SYSCTL
58330+ if (dirname == NULL || name == NULL)
58331+ return 0;
58332+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
58333+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
58334+ return -EACCES;
58335+ }
58336+#endif
58337+ return 0;
58338+}
58339+
58340+#ifdef CONFIG_GRKERNSEC_ROFS
58341+static int __maybe_unused one = 1;
58342+#endif
58343+
58344+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
58345+struct ctl_table grsecurity_table[] = {
58346+#ifdef CONFIG_GRKERNSEC_SYSCTL
58347+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
58348+#ifdef CONFIG_GRKERNSEC_IO
58349+ {
58350+ .procname = "disable_priv_io",
58351+ .data = &grsec_disable_privio,
58352+ .maxlen = sizeof(int),
58353+ .mode = 0600,
58354+ .proc_handler = &proc_dointvec,
58355+ },
58356+#endif
58357+#endif
58358+#ifdef CONFIG_GRKERNSEC_LINK
58359+ {
58360+ .procname = "linking_restrictions",
58361+ .data = &grsec_enable_link,
58362+ .maxlen = sizeof(int),
58363+ .mode = 0600,
58364+ .proc_handler = &proc_dointvec,
58365+ },
58366+#endif
58367+#ifdef CONFIG_GRKERNSEC_BRUTE
58368+ {
58369+ .procname = "deter_bruteforce",
58370+ .data = &grsec_enable_brute,
58371+ .maxlen = sizeof(int),
58372+ .mode = 0600,
58373+ .proc_handler = &proc_dointvec,
58374+ },
58375+#endif
58376+#ifdef CONFIG_GRKERNSEC_FIFO
58377+ {
58378+ .procname = "fifo_restrictions",
58379+ .data = &grsec_enable_fifo,
58380+ .maxlen = sizeof(int),
58381+ .mode = 0600,
58382+ .proc_handler = &proc_dointvec,
58383+ },
58384+#endif
58385+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58386+ {
58387+ .procname = "ptrace_readexec",
58388+ .data = &grsec_enable_ptrace_readexec,
58389+ .maxlen = sizeof(int),
58390+ .mode = 0600,
58391+ .proc_handler = &proc_dointvec,
58392+ },
58393+#endif
58394+#ifdef CONFIG_GRKERNSEC_SETXID
58395+ {
58396+ .procname = "consistent_setxid",
58397+ .data = &grsec_enable_setxid,
58398+ .maxlen = sizeof(int),
58399+ .mode = 0600,
58400+ .proc_handler = &proc_dointvec,
58401+ },
58402+#endif
58403+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
58404+ {
58405+ .procname = "ip_blackhole",
58406+ .data = &grsec_enable_blackhole,
58407+ .maxlen = sizeof(int),
58408+ .mode = 0600,
58409+ .proc_handler = &proc_dointvec,
58410+ },
58411+ {
58412+ .procname = "lastack_retries",
58413+ .data = &grsec_lastack_retries,
58414+ .maxlen = sizeof(int),
58415+ .mode = 0600,
58416+ .proc_handler = &proc_dointvec,
58417+ },
58418+#endif
58419+#ifdef CONFIG_GRKERNSEC_EXECLOG
58420+ {
58421+ .procname = "exec_logging",
58422+ .data = &grsec_enable_execlog,
58423+ .maxlen = sizeof(int),
58424+ .mode = 0600,
58425+ .proc_handler = &proc_dointvec,
58426+ },
58427+#endif
58428+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58429+ {
58430+ .procname = "rwxmap_logging",
58431+ .data = &grsec_enable_log_rwxmaps,
58432+ .maxlen = sizeof(int),
58433+ .mode = 0600,
58434+ .proc_handler = &proc_dointvec,
58435+ },
58436+#endif
58437+#ifdef CONFIG_GRKERNSEC_SIGNAL
58438+ {
58439+ .procname = "signal_logging",
58440+ .data = &grsec_enable_signal,
58441+ .maxlen = sizeof(int),
58442+ .mode = 0600,
58443+ .proc_handler = &proc_dointvec,
58444+ },
58445+#endif
58446+#ifdef CONFIG_GRKERNSEC_FORKFAIL
58447+ {
58448+ .procname = "forkfail_logging",
58449+ .data = &grsec_enable_forkfail,
58450+ .maxlen = sizeof(int),
58451+ .mode = 0600,
58452+ .proc_handler = &proc_dointvec,
58453+ },
58454+#endif
58455+#ifdef CONFIG_GRKERNSEC_TIME
58456+ {
58457+ .procname = "timechange_logging",
58458+ .data = &grsec_enable_time,
58459+ .maxlen = sizeof(int),
58460+ .mode = 0600,
58461+ .proc_handler = &proc_dointvec,
58462+ },
58463+#endif
58464+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
58465+ {
58466+ .procname = "chroot_deny_shmat",
58467+ .data = &grsec_enable_chroot_shmat,
58468+ .maxlen = sizeof(int),
58469+ .mode = 0600,
58470+ .proc_handler = &proc_dointvec,
58471+ },
58472+#endif
58473+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
58474+ {
58475+ .procname = "chroot_deny_unix",
58476+ .data = &grsec_enable_chroot_unix,
58477+ .maxlen = sizeof(int),
58478+ .mode = 0600,
58479+ .proc_handler = &proc_dointvec,
58480+ },
58481+#endif
58482+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
58483+ {
58484+ .procname = "chroot_deny_mount",
58485+ .data = &grsec_enable_chroot_mount,
58486+ .maxlen = sizeof(int),
58487+ .mode = 0600,
58488+ .proc_handler = &proc_dointvec,
58489+ },
58490+#endif
58491+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
58492+ {
58493+ .procname = "chroot_deny_fchdir",
58494+ .data = &grsec_enable_chroot_fchdir,
58495+ .maxlen = sizeof(int),
58496+ .mode = 0600,
58497+ .proc_handler = &proc_dointvec,
58498+ },
58499+#endif
58500+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
58501+ {
58502+ .procname = "chroot_deny_chroot",
58503+ .data = &grsec_enable_chroot_double,
58504+ .maxlen = sizeof(int),
58505+ .mode = 0600,
58506+ .proc_handler = &proc_dointvec,
58507+ },
58508+#endif
58509+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
58510+ {
58511+ .procname = "chroot_deny_pivot",
58512+ .data = &grsec_enable_chroot_pivot,
58513+ .maxlen = sizeof(int),
58514+ .mode = 0600,
58515+ .proc_handler = &proc_dointvec,
58516+ },
58517+#endif
58518+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
58519+ {
58520+ .procname = "chroot_enforce_chdir",
58521+ .data = &grsec_enable_chroot_chdir,
58522+ .maxlen = sizeof(int),
58523+ .mode = 0600,
58524+ .proc_handler = &proc_dointvec,
58525+ },
58526+#endif
58527+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
58528+ {
58529+ .procname = "chroot_deny_chmod",
58530+ .data = &grsec_enable_chroot_chmod,
58531+ .maxlen = sizeof(int),
58532+ .mode = 0600,
58533+ .proc_handler = &proc_dointvec,
58534+ },
58535+#endif
58536+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
58537+ {
58538+ .procname = "chroot_deny_mknod",
58539+ .data = &grsec_enable_chroot_mknod,
58540+ .maxlen = sizeof(int),
58541+ .mode = 0600,
58542+ .proc_handler = &proc_dointvec,
58543+ },
58544+#endif
58545+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
58546+ {
58547+ .procname = "chroot_restrict_nice",
58548+ .data = &grsec_enable_chroot_nice,
58549+ .maxlen = sizeof(int),
58550+ .mode = 0600,
58551+ .proc_handler = &proc_dointvec,
58552+ },
58553+#endif
58554+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
58555+ {
58556+ .procname = "chroot_execlog",
58557+ .data = &grsec_enable_chroot_execlog,
58558+ .maxlen = sizeof(int),
58559+ .mode = 0600,
58560+ .proc_handler = &proc_dointvec,
58561+ },
58562+#endif
58563+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
58564+ {
58565+ .procname = "chroot_caps",
58566+ .data = &grsec_enable_chroot_caps,
58567+ .maxlen = sizeof(int),
58568+ .mode = 0600,
58569+ .proc_handler = &proc_dointvec,
58570+ },
58571+#endif
58572+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
58573+ {
58574+ .procname = "chroot_deny_sysctl",
58575+ .data = &grsec_enable_chroot_sysctl,
58576+ .maxlen = sizeof(int),
58577+ .mode = 0600,
58578+ .proc_handler = &proc_dointvec,
58579+ },
58580+#endif
58581+#ifdef CONFIG_GRKERNSEC_TPE
58582+ {
58583+ .procname = "tpe",
58584+ .data = &grsec_enable_tpe,
58585+ .maxlen = sizeof(int),
58586+ .mode = 0600,
58587+ .proc_handler = &proc_dointvec,
58588+ },
58589+ {
58590+ .procname = "tpe_gid",
58591+ .data = &grsec_tpe_gid,
58592+ .maxlen = sizeof(int),
58593+ .mode = 0600,
58594+ .proc_handler = &proc_dointvec,
58595+ },
58596+#endif
58597+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58598+ {
58599+ .procname = "tpe_invert",
58600+ .data = &grsec_enable_tpe_invert,
58601+ .maxlen = sizeof(int),
58602+ .mode = 0600,
58603+ .proc_handler = &proc_dointvec,
58604+ },
58605+#endif
58606+#ifdef CONFIG_GRKERNSEC_TPE_ALL
58607+ {
58608+ .procname = "tpe_restrict_all",
58609+ .data = &grsec_enable_tpe_all,
58610+ .maxlen = sizeof(int),
58611+ .mode = 0600,
58612+ .proc_handler = &proc_dointvec,
58613+ },
58614+#endif
58615+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58616+ {
58617+ .procname = "socket_all",
58618+ .data = &grsec_enable_socket_all,
58619+ .maxlen = sizeof(int),
58620+ .mode = 0600,
58621+ .proc_handler = &proc_dointvec,
58622+ },
58623+ {
58624+ .procname = "socket_all_gid",
58625+ .data = &grsec_socket_all_gid,
58626+ .maxlen = sizeof(int),
58627+ .mode = 0600,
58628+ .proc_handler = &proc_dointvec,
58629+ },
58630+#endif
58631+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58632+ {
58633+ .procname = "socket_client",
58634+ .data = &grsec_enable_socket_client,
58635+ .maxlen = sizeof(int),
58636+ .mode = 0600,
58637+ .proc_handler = &proc_dointvec,
58638+ },
58639+ {
58640+ .procname = "socket_client_gid",
58641+ .data = &grsec_socket_client_gid,
58642+ .maxlen = sizeof(int),
58643+ .mode = 0600,
58644+ .proc_handler = &proc_dointvec,
58645+ },
58646+#endif
58647+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58648+ {
58649+ .procname = "socket_server",
58650+ .data = &grsec_enable_socket_server,
58651+ .maxlen = sizeof(int),
58652+ .mode = 0600,
58653+ .proc_handler = &proc_dointvec,
58654+ },
58655+ {
58656+ .procname = "socket_server_gid",
58657+ .data = &grsec_socket_server_gid,
58658+ .maxlen = sizeof(int),
58659+ .mode = 0600,
58660+ .proc_handler = &proc_dointvec,
58661+ },
58662+#endif
58663+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
58664+ {
58665+ .procname = "audit_group",
58666+ .data = &grsec_enable_group,
58667+ .maxlen = sizeof(int),
58668+ .mode = 0600,
58669+ .proc_handler = &proc_dointvec,
58670+ },
58671+ {
58672+ .procname = "audit_gid",
58673+ .data = &grsec_audit_gid,
58674+ .maxlen = sizeof(int),
58675+ .mode = 0600,
58676+ .proc_handler = &proc_dointvec,
58677+ },
58678+#endif
58679+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
58680+ {
58681+ .procname = "audit_chdir",
58682+ .data = &grsec_enable_chdir,
58683+ .maxlen = sizeof(int),
58684+ .mode = 0600,
58685+ .proc_handler = &proc_dointvec,
58686+ },
58687+#endif
58688+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58689+ {
58690+ .procname = "audit_mount",
58691+ .data = &grsec_enable_mount,
58692+ .maxlen = sizeof(int),
58693+ .mode = 0600,
58694+ .proc_handler = &proc_dointvec,
58695+ },
58696+#endif
58697+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58698+ {
58699+ .procname = "audit_textrel",
58700+ .data = &grsec_enable_audit_textrel,
58701+ .maxlen = sizeof(int),
58702+ .mode = 0600,
58703+ .proc_handler = &proc_dointvec,
58704+ },
58705+#endif
58706+#ifdef CONFIG_GRKERNSEC_DMESG
58707+ {
58708+ .procname = "dmesg",
58709+ .data = &grsec_enable_dmesg,
58710+ .maxlen = sizeof(int),
58711+ .mode = 0600,
58712+ .proc_handler = &proc_dointvec,
58713+ },
58714+#endif
58715+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58716+ {
58717+ .procname = "chroot_findtask",
58718+ .data = &grsec_enable_chroot_findtask,
58719+ .maxlen = sizeof(int),
58720+ .mode = 0600,
58721+ .proc_handler = &proc_dointvec,
58722+ },
58723+#endif
58724+#ifdef CONFIG_GRKERNSEC_RESLOG
58725+ {
58726+ .procname = "resource_logging",
58727+ .data = &grsec_resource_logging,
58728+ .maxlen = sizeof(int),
58729+ .mode = 0600,
58730+ .proc_handler = &proc_dointvec,
58731+ },
58732+#endif
58733+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58734+ {
58735+ .procname = "audit_ptrace",
58736+ .data = &grsec_enable_audit_ptrace,
58737+ .maxlen = sizeof(int),
58738+ .mode = 0600,
58739+ .proc_handler = &proc_dointvec,
58740+ },
58741+#endif
58742+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
58743+ {
58744+ .procname = "harden_ptrace",
58745+ .data = &grsec_enable_harden_ptrace,
58746+ .maxlen = sizeof(int),
58747+ .mode = 0600,
58748+ .proc_handler = &proc_dointvec,
58749+ },
58750+#endif
58751+ {
58752+ .procname = "grsec_lock",
58753+ .data = &grsec_lock,
58754+ .maxlen = sizeof(int),
58755+ .mode = 0600,
58756+ .proc_handler = &proc_dointvec,
58757+ },
58758+#endif
58759+#ifdef CONFIG_GRKERNSEC_ROFS
58760+ {
58761+ .procname = "romount_protect",
58762+ .data = &grsec_enable_rofs,
58763+ .maxlen = sizeof(int),
58764+ .mode = 0600,
58765+ .proc_handler = &proc_dointvec_minmax,
58766+ .extra1 = &one,
58767+ .extra2 = &one,
58768+ },
58769+#endif
58770+ { }
58771+};
58772+#endif
58773diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
58774new file mode 100644
58775index 0000000..0dc13c3
58776--- /dev/null
58777+++ b/grsecurity/grsec_time.c
58778@@ -0,0 +1,16 @@
58779+#include <linux/kernel.h>
58780+#include <linux/sched.h>
58781+#include <linux/grinternal.h>
58782+#include <linux/module.h>
58783+
58784+void
58785+gr_log_timechange(void)
58786+{
58787+#ifdef CONFIG_GRKERNSEC_TIME
58788+ if (grsec_enable_time)
58789+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
58790+#endif
58791+ return;
58792+}
58793+
58794+EXPORT_SYMBOL(gr_log_timechange);
58795diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
58796new file mode 100644
58797index 0000000..07e0dc0
58798--- /dev/null
58799+++ b/grsecurity/grsec_tpe.c
58800@@ -0,0 +1,73 @@
58801+#include <linux/kernel.h>
58802+#include <linux/sched.h>
58803+#include <linux/file.h>
58804+#include <linux/fs.h>
58805+#include <linux/grinternal.h>
58806+
58807+extern int gr_acl_tpe_check(void);
58808+
58809+int
58810+gr_tpe_allow(const struct file *file)
58811+{
58812+#ifdef CONFIG_GRKERNSEC
58813+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
58814+ const struct cred *cred = current_cred();
58815+ char *msg = NULL;
58816+ char *msg2 = NULL;
58817+
58818+ // never restrict root
58819+ if (!cred->uid)
58820+ return 1;
58821+
58822+ if (grsec_enable_tpe) {
58823+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58824+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
58825+ msg = "not being in trusted group";
58826+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
58827+ msg = "being in untrusted group";
58828+#else
58829+ if (in_group_p(grsec_tpe_gid))
58830+ msg = "being in untrusted group";
58831+#endif
58832+ }
58833+ if (!msg && gr_acl_tpe_check())
58834+ msg = "being in untrusted role";
58835+
58836+ // not in any affected group/role
58837+ if (!msg)
58838+ goto next_check;
58839+
58840+ if (inode->i_uid)
58841+ msg2 = "file in non-root-owned directory";
58842+ else if (inode->i_mode & S_IWOTH)
58843+ msg2 = "file in world-writable directory";
58844+ else if (inode->i_mode & S_IWGRP)
58845+ msg2 = "file in group-writable directory";
58846+
58847+ if (msg && msg2) {
58848+ char fullmsg[70] = {0};
58849+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
58850+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
58851+ return 0;
58852+ }
58853+ msg = NULL;
58854+next_check:
58855+#ifdef CONFIG_GRKERNSEC_TPE_ALL
58856+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
58857+ return 1;
58858+
58859+ if (inode->i_uid && (inode->i_uid != cred->uid))
58860+ msg = "directory not owned by user";
58861+ else if (inode->i_mode & S_IWOTH)
58862+ msg = "file in world-writable directory";
58863+ else if (inode->i_mode & S_IWGRP)
58864+ msg = "file in group-writable directory";
58865+
58866+ if (msg) {
58867+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
58868+ return 0;
58869+ }
58870+#endif
58871+#endif
58872+ return 1;
58873+}
58874diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
58875new file mode 100644
58876index 0000000..9f7b1ac
58877--- /dev/null
58878+++ b/grsecurity/grsum.c
58879@@ -0,0 +1,61 @@
58880+#include <linux/err.h>
58881+#include <linux/kernel.h>
58882+#include <linux/sched.h>
58883+#include <linux/mm.h>
58884+#include <linux/scatterlist.h>
58885+#include <linux/crypto.h>
58886+#include <linux/gracl.h>
58887+
58888+
58889+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
58890+#error "crypto and sha256 must be built into the kernel"
58891+#endif
58892+
58893+int
58894+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
58895+{
58896+ char *p;
58897+ struct crypto_hash *tfm;
58898+ struct hash_desc desc;
58899+ struct scatterlist sg;
58900+ unsigned char temp_sum[GR_SHA_LEN];
58901+ volatile int retval = 0;
58902+ volatile int dummy = 0;
58903+ unsigned int i;
58904+
58905+ sg_init_table(&sg, 1);
58906+
58907+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
58908+ if (IS_ERR(tfm)) {
58909+ /* should never happen, since sha256 should be built in */
58910+ return 1;
58911+ }
58912+
58913+ desc.tfm = tfm;
58914+ desc.flags = 0;
58915+
58916+ crypto_hash_init(&desc);
58917+
58918+ p = salt;
58919+ sg_set_buf(&sg, p, GR_SALT_LEN);
58920+ crypto_hash_update(&desc, &sg, sg.length);
58921+
58922+ p = entry->pw;
58923+ sg_set_buf(&sg, p, strlen(p));
58924+
58925+ crypto_hash_update(&desc, &sg, sg.length);
58926+
58927+ crypto_hash_final(&desc, temp_sum);
58928+
58929+ memset(entry->pw, 0, GR_PW_LEN);
58930+
58931+ for (i = 0; i < GR_SHA_LEN; i++)
58932+ if (sum[i] != temp_sum[i])
58933+ retval = 1;
58934+ else
58935+ dummy = 1; // waste a cycle
58936+
58937+ crypto_free_hash(tfm);
58938+
58939+ return retval;
58940+}
58941diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
58942index f1c8ca6..b5c1cc7 100644
58943--- a/include/acpi/acpi_bus.h
58944+++ b/include/acpi/acpi_bus.h
58945@@ -107,7 +107,7 @@ struct acpi_device_ops {
58946 acpi_op_bind bind;
58947 acpi_op_unbind unbind;
58948 acpi_op_notify notify;
58949-};
58950+} __no_const;
58951
58952 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
58953
58954diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
58955index b7babf0..71e4e74 100644
58956--- a/include/asm-generic/atomic-long.h
58957+++ b/include/asm-generic/atomic-long.h
58958@@ -22,6 +22,12 @@
58959
58960 typedef atomic64_t atomic_long_t;
58961
58962+#ifdef CONFIG_PAX_REFCOUNT
58963+typedef atomic64_unchecked_t atomic_long_unchecked_t;
58964+#else
58965+typedef atomic64_t atomic_long_unchecked_t;
58966+#endif
58967+
58968 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
58969
58970 static inline long atomic_long_read(atomic_long_t *l)
58971@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
58972 return (long)atomic64_read(v);
58973 }
58974
58975+#ifdef CONFIG_PAX_REFCOUNT
58976+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
58977+{
58978+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58979+
58980+ return (long)atomic64_read_unchecked(v);
58981+}
58982+#endif
58983+
58984 static inline void atomic_long_set(atomic_long_t *l, long i)
58985 {
58986 atomic64_t *v = (atomic64_t *)l;
58987@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
58988 atomic64_set(v, i);
58989 }
58990
58991+#ifdef CONFIG_PAX_REFCOUNT
58992+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
58993+{
58994+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58995+
58996+ atomic64_set_unchecked(v, i);
58997+}
58998+#endif
58999+
59000 static inline void atomic_long_inc(atomic_long_t *l)
59001 {
59002 atomic64_t *v = (atomic64_t *)l;
59003@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
59004 atomic64_inc(v);
59005 }
59006
59007+#ifdef CONFIG_PAX_REFCOUNT
59008+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
59009+{
59010+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59011+
59012+ atomic64_inc_unchecked(v);
59013+}
59014+#endif
59015+
59016 static inline void atomic_long_dec(atomic_long_t *l)
59017 {
59018 atomic64_t *v = (atomic64_t *)l;
59019@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
59020 atomic64_dec(v);
59021 }
59022
59023+#ifdef CONFIG_PAX_REFCOUNT
59024+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59025+{
59026+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59027+
59028+ atomic64_dec_unchecked(v);
59029+}
59030+#endif
59031+
59032 static inline void atomic_long_add(long i, atomic_long_t *l)
59033 {
59034 atomic64_t *v = (atomic64_t *)l;
59035@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
59036 atomic64_add(i, v);
59037 }
59038
59039+#ifdef CONFIG_PAX_REFCOUNT
59040+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
59041+{
59042+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59043+
59044+ atomic64_add_unchecked(i, v);
59045+}
59046+#endif
59047+
59048 static inline void atomic_long_sub(long i, atomic_long_t *l)
59049 {
59050 atomic64_t *v = (atomic64_t *)l;
59051@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
59052 atomic64_sub(i, v);
59053 }
59054
59055+#ifdef CONFIG_PAX_REFCOUNT
59056+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59057+{
59058+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59059+
59060+ atomic64_sub_unchecked(i, v);
59061+}
59062+#endif
59063+
59064 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59065 {
59066 atomic64_t *v = (atomic64_t *)l;
59067@@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
59068 return (long)atomic64_inc_return(v);
59069 }
59070
59071+#ifdef CONFIG_PAX_REFCOUNT
59072+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
59073+{
59074+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59075+
59076+ return (long)atomic64_inc_return_unchecked(v);
59077+}
59078+#endif
59079+
59080 static inline long atomic_long_dec_return(atomic_long_t *l)
59081 {
59082 atomic64_t *v = (atomic64_t *)l;
59083@@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
59084
59085 typedef atomic_t atomic_long_t;
59086
59087+#ifdef CONFIG_PAX_REFCOUNT
59088+typedef atomic_unchecked_t atomic_long_unchecked_t;
59089+#else
59090+typedef atomic_t atomic_long_unchecked_t;
59091+#endif
59092+
59093 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
59094 static inline long atomic_long_read(atomic_long_t *l)
59095 {
59096@@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
59097 return (long)atomic_read(v);
59098 }
59099
59100+#ifdef CONFIG_PAX_REFCOUNT
59101+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
59102+{
59103+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59104+
59105+ return (long)atomic_read_unchecked(v);
59106+}
59107+#endif
59108+
59109 static inline void atomic_long_set(atomic_long_t *l, long i)
59110 {
59111 atomic_t *v = (atomic_t *)l;
59112@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
59113 atomic_set(v, i);
59114 }
59115
59116+#ifdef CONFIG_PAX_REFCOUNT
59117+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
59118+{
59119+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59120+
59121+ atomic_set_unchecked(v, i);
59122+}
59123+#endif
59124+
59125 static inline void atomic_long_inc(atomic_long_t *l)
59126 {
59127 atomic_t *v = (atomic_t *)l;
59128@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
59129 atomic_inc(v);
59130 }
59131
59132+#ifdef CONFIG_PAX_REFCOUNT
59133+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
59134+{
59135+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59136+
59137+ atomic_inc_unchecked(v);
59138+}
59139+#endif
59140+
59141 static inline void atomic_long_dec(atomic_long_t *l)
59142 {
59143 atomic_t *v = (atomic_t *)l;
59144@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
59145 atomic_dec(v);
59146 }
59147
59148+#ifdef CONFIG_PAX_REFCOUNT
59149+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59150+{
59151+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59152+
59153+ atomic_dec_unchecked(v);
59154+}
59155+#endif
59156+
59157 static inline void atomic_long_add(long i, atomic_long_t *l)
59158 {
59159 atomic_t *v = (atomic_t *)l;
59160@@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
59161 atomic_add(i, v);
59162 }
59163
59164+#ifdef CONFIG_PAX_REFCOUNT
59165+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
59166+{
59167+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59168+
59169+ atomic_add_unchecked(i, v);
59170+}
59171+#endif
59172+
59173 static inline void atomic_long_sub(long i, atomic_long_t *l)
59174 {
59175 atomic_t *v = (atomic_t *)l;
59176@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
59177 atomic_sub(i, v);
59178 }
59179
59180+#ifdef CONFIG_PAX_REFCOUNT
59181+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59182+{
59183+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59184+
59185+ atomic_sub_unchecked(i, v);
59186+}
59187+#endif
59188+
59189 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59190 {
59191 atomic_t *v = (atomic_t *)l;
59192@@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
59193 return (long)atomic_inc_return(v);
59194 }
59195
59196+#ifdef CONFIG_PAX_REFCOUNT
59197+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
59198+{
59199+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59200+
59201+ return (long)atomic_inc_return_unchecked(v);
59202+}
59203+#endif
59204+
59205 static inline long atomic_long_dec_return(atomic_long_t *l)
59206 {
59207 atomic_t *v = (atomic_t *)l;
59208@@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
59209
59210 #endif /* BITS_PER_LONG == 64 */
59211
59212+#ifdef CONFIG_PAX_REFCOUNT
59213+static inline void pax_refcount_needs_these_functions(void)
59214+{
59215+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
59216+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
59217+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
59218+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
59219+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
59220+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
59221+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
59222+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
59223+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
59224+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
59225+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
59226+
59227+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
59228+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
59229+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
59230+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
59231+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
59232+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
59233+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
59234+}
59235+#else
59236+#define atomic_read_unchecked(v) atomic_read(v)
59237+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
59238+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
59239+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
59240+#define atomic_inc_unchecked(v) atomic_inc(v)
59241+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
59242+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
59243+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
59244+#define atomic_dec_unchecked(v) atomic_dec(v)
59245+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
59246+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
59247+
59248+#define atomic_long_read_unchecked(v) atomic_long_read(v)
59249+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
59250+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
59251+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
59252+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
59253+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
59254+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
59255+#endif
59256+
59257 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
59258diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
59259index b18ce4f..2ee2843 100644
59260--- a/include/asm-generic/atomic64.h
59261+++ b/include/asm-generic/atomic64.h
59262@@ -16,6 +16,8 @@ typedef struct {
59263 long long counter;
59264 } atomic64_t;
59265
59266+typedef atomic64_t atomic64_unchecked_t;
59267+
59268 #define ATOMIC64_INIT(i) { (i) }
59269
59270 extern long long atomic64_read(const atomic64_t *v);
59271@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
59272 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
59273 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
59274
59275+#define atomic64_read_unchecked(v) atomic64_read(v)
59276+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
59277+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
59278+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
59279+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
59280+#define atomic64_inc_unchecked(v) atomic64_inc(v)
59281+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
59282+#define atomic64_dec_unchecked(v) atomic64_dec(v)
59283+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
59284+
59285 #endif /* _ASM_GENERIC_ATOMIC64_H */
59286diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
59287index 1bfcfe5..e04c5c9 100644
59288--- a/include/asm-generic/cache.h
59289+++ b/include/asm-generic/cache.h
59290@@ -6,7 +6,7 @@
59291 * cache lines need to provide their own cache.h.
59292 */
59293
59294-#define L1_CACHE_SHIFT 5
59295-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
59296+#define L1_CACHE_SHIFT 5UL
59297+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
59298
59299 #endif /* __ASM_GENERIC_CACHE_H */
59300diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
59301index 0d68a1e..b74a761 100644
59302--- a/include/asm-generic/emergency-restart.h
59303+++ b/include/asm-generic/emergency-restart.h
59304@@ -1,7 +1,7 @@
59305 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
59306 #define _ASM_GENERIC_EMERGENCY_RESTART_H
59307
59308-static inline void machine_emergency_restart(void)
59309+static inline __noreturn void machine_emergency_restart(void)
59310 {
59311 machine_restart(NULL);
59312 }
59313diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
59314index 0232ccb..13d9165 100644
59315--- a/include/asm-generic/kmap_types.h
59316+++ b/include/asm-generic/kmap_types.h
59317@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
59318 KMAP_D(17) KM_NMI,
59319 KMAP_D(18) KM_NMI_PTE,
59320 KMAP_D(19) KM_KDB,
59321+KMAP_D(20) KM_CLEARPAGE,
59322 /*
59323 * Remember to update debug_kmap_atomic() when adding new kmap types!
59324 */
59325-KMAP_D(20) KM_TYPE_NR
59326+KMAP_D(21) KM_TYPE_NR
59327 };
59328
59329 #undef KMAP_D
59330diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
59331index 9ceb03b..2efbcbd 100644
59332--- a/include/asm-generic/local.h
59333+++ b/include/asm-generic/local.h
59334@@ -39,6 +39,7 @@ typedef struct
59335 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
59336 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
59337 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
59338+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
59339
59340 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
59341 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
59342diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
59343index 725612b..9cc513a 100644
59344--- a/include/asm-generic/pgtable-nopmd.h
59345+++ b/include/asm-generic/pgtable-nopmd.h
59346@@ -1,14 +1,19 @@
59347 #ifndef _PGTABLE_NOPMD_H
59348 #define _PGTABLE_NOPMD_H
59349
59350-#ifndef __ASSEMBLY__
59351-
59352 #include <asm-generic/pgtable-nopud.h>
59353
59354-struct mm_struct;
59355-
59356 #define __PAGETABLE_PMD_FOLDED
59357
59358+#define PMD_SHIFT PUD_SHIFT
59359+#define PTRS_PER_PMD 1
59360+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
59361+#define PMD_MASK (~(PMD_SIZE-1))
59362+
59363+#ifndef __ASSEMBLY__
59364+
59365+struct mm_struct;
59366+
59367 /*
59368 * Having the pmd type consist of a pud gets the size right, and allows
59369 * us to conceptually access the pud entry that this pmd is folded into
59370@@ -16,11 +21,6 @@ struct mm_struct;
59371 */
59372 typedef struct { pud_t pud; } pmd_t;
59373
59374-#define PMD_SHIFT PUD_SHIFT
59375-#define PTRS_PER_PMD 1
59376-#define PMD_SIZE (1UL << PMD_SHIFT)
59377-#define PMD_MASK (~(PMD_SIZE-1))
59378-
59379 /*
59380 * The "pud_xxx()" functions here are trivial for a folded two-level
59381 * setup: the pmd is never bad, and a pmd always exists (as it's folded
59382diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
59383index 810431d..0ec4804f 100644
59384--- a/include/asm-generic/pgtable-nopud.h
59385+++ b/include/asm-generic/pgtable-nopud.h
59386@@ -1,10 +1,15 @@
59387 #ifndef _PGTABLE_NOPUD_H
59388 #define _PGTABLE_NOPUD_H
59389
59390-#ifndef __ASSEMBLY__
59391-
59392 #define __PAGETABLE_PUD_FOLDED
59393
59394+#define PUD_SHIFT PGDIR_SHIFT
59395+#define PTRS_PER_PUD 1
59396+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
59397+#define PUD_MASK (~(PUD_SIZE-1))
59398+
59399+#ifndef __ASSEMBLY__
59400+
59401 /*
59402 * Having the pud type consist of a pgd gets the size right, and allows
59403 * us to conceptually access the pgd entry that this pud is folded into
59404@@ -12,11 +17,6 @@
59405 */
59406 typedef struct { pgd_t pgd; } pud_t;
59407
59408-#define PUD_SHIFT PGDIR_SHIFT
59409-#define PTRS_PER_PUD 1
59410-#define PUD_SIZE (1UL << PUD_SHIFT)
59411-#define PUD_MASK (~(PUD_SIZE-1))
59412-
59413 /*
59414 * The "pgd_xxx()" functions here are trivial for a folded two-level
59415 * setup: the pud is never bad, and a pud always exists (as it's folded
59416@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
59417 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
59418
59419 #define pgd_populate(mm, pgd, pud) do { } while (0)
59420+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
59421 /*
59422 * (puds are folded into pgds so this doesn't get actually called,
59423 * but the define is needed for a generic inline function.)
59424diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
59425index 125c54e..e95c18e 100644
59426--- a/include/asm-generic/pgtable.h
59427+++ b/include/asm-generic/pgtable.h
59428@@ -446,6 +446,18 @@ static inline int pmd_write(pmd_t pmd)
59429 #endif /* __HAVE_ARCH_PMD_WRITE */
59430 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
59431
59432+#ifndef __HAVE_ARCH_READ_PMD_ATOMIC
59433+static inline pmd_t read_pmd_atomic(pmd_t *pmdp)
59434+{
59435+ /*
59436+ * Depend on compiler for an atomic pmd read. NOTE: this is
59437+ * only going to work, if the pmdval_t isn't larger than
59438+ * an unsigned long.
59439+ */
59440+ return *pmdp;
59441+}
59442+#endif /* __HAVE_ARCH_READ_PMD_ATOMIC */
59443+
59444 /*
59445 * This function is meant to be used by sites walking pagetables with
59446 * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
59447@@ -459,11 +471,17 @@ static inline int pmd_write(pmd_t pmd)
59448 * undefined so behaving like if the pmd was none is safe (because it
59449 * can return none anyway). The compiler level barrier() is critically
59450 * important to compute the two checks atomically on the same pmdval.
59451+ *
59452+ * For 32bit kernels with a 64bit large pmd_t this automatically takes
59453+ * care of reading the pmd atomically to avoid SMP race conditions
59454+ * against pmd_populate() when the mmap_sem is hold for reading by the
59455+ * caller (a special atomic read not done by "gcc" as in the generic
59456+ * version above, is also needed when THP is disabled because the page
59457+ * fault can populate the pmd from under us).
59458 */
59459 static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
59460 {
59461- /* depend on compiler for an atomic pmd read */
59462- pmd_t pmdval = *pmd;
59463+ pmd_t pmdval = read_pmd_atomic(pmd);
59464 /*
59465 * The barrier will stabilize the pmdval in a register or on
59466 * the stack so that it will stop changing under the code.
59467@@ -503,6 +521,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
59468 #endif
59469 }
59470
59471+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
59472+static inline unsigned long pax_open_kernel(void) { return 0; }
59473+#endif
59474+
59475+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
59476+static inline unsigned long pax_close_kernel(void) { return 0; }
59477+#endif
59478+
59479 #endif /* CONFIG_MMU */
59480
59481 #endif /* !__ASSEMBLY__ */
59482diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
59483index 8aeadf6..f1dc019 100644
59484--- a/include/asm-generic/vmlinux.lds.h
59485+++ b/include/asm-generic/vmlinux.lds.h
59486@@ -218,6 +218,7 @@
59487 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
59488 VMLINUX_SYMBOL(__start_rodata) = .; \
59489 *(.rodata) *(.rodata.*) \
59490+ *(.data..read_only) \
59491 *(__vermagic) /* Kernel version magic */ \
59492 . = ALIGN(8); \
59493 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
59494@@ -716,17 +717,18 @@
59495 * section in the linker script will go there too. @phdr should have
59496 * a leading colon.
59497 *
59498- * Note that this macros defines __per_cpu_load as an absolute symbol.
59499+ * Note that this macros defines per_cpu_load as an absolute symbol.
59500 * If there is no need to put the percpu section at a predetermined
59501 * address, use PERCPU_SECTION.
59502 */
59503 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
59504- VMLINUX_SYMBOL(__per_cpu_load) = .; \
59505- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
59506+ per_cpu_load = .; \
59507+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
59508 - LOAD_OFFSET) { \
59509+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
59510 PERCPU_INPUT(cacheline) \
59511 } phdr \
59512- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
59513+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
59514
59515 /**
59516 * PERCPU_SECTION - define output section for percpu area, simple version
59517diff --git a/include/drm/drmP.h b/include/drm/drmP.h
59518index dd73104..fde86bd 100644
59519--- a/include/drm/drmP.h
59520+++ b/include/drm/drmP.h
59521@@ -72,6 +72,7 @@
59522 #include <linux/workqueue.h>
59523 #include <linux/poll.h>
59524 #include <asm/pgalloc.h>
59525+#include <asm/local.h>
59526 #include "drm.h"
59527
59528 #include <linux/idr.h>
59529@@ -1074,7 +1075,7 @@ struct drm_device {
59530
59531 /** \name Usage Counters */
59532 /*@{ */
59533- int open_count; /**< Outstanding files open */
59534+ local_t open_count; /**< Outstanding files open */
59535 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
59536 atomic_t vma_count; /**< Outstanding vma areas open */
59537 int buf_use; /**< Buffers in use -- cannot alloc */
59538@@ -1085,7 +1086,7 @@ struct drm_device {
59539 /*@{ */
59540 unsigned long counters;
59541 enum drm_stat_type types[15];
59542- atomic_t counts[15];
59543+ atomic_unchecked_t counts[15];
59544 /*@} */
59545
59546 struct list_head filelist;
59547diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
59548index 37515d1..34fa8b0 100644
59549--- a/include/drm/drm_crtc_helper.h
59550+++ b/include/drm/drm_crtc_helper.h
59551@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
59552
59553 /* disable crtc when not in use - more explicit than dpms off */
59554 void (*disable)(struct drm_crtc *crtc);
59555-};
59556+} __no_const;
59557
59558 struct drm_encoder_helper_funcs {
59559 void (*dpms)(struct drm_encoder *encoder, int mode);
59560@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
59561 struct drm_connector *connector);
59562 /* disable encoder when not in use - more explicit than dpms off */
59563 void (*disable)(struct drm_encoder *encoder);
59564-};
59565+} __no_const;
59566
59567 struct drm_connector_helper_funcs {
59568 int (*get_modes)(struct drm_connector *connector);
59569diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
59570index d6d1da4..fdd1ac5 100644
59571--- a/include/drm/ttm/ttm_memory.h
59572+++ b/include/drm/ttm/ttm_memory.h
59573@@ -48,7 +48,7 @@
59574
59575 struct ttm_mem_shrink {
59576 int (*do_shrink) (struct ttm_mem_shrink *);
59577-};
59578+} __no_const;
59579
59580 /**
59581 * struct ttm_mem_global - Global memory accounting structure.
59582diff --git a/include/linux/a.out.h b/include/linux/a.out.h
59583index e86dfca..40cc55f 100644
59584--- a/include/linux/a.out.h
59585+++ b/include/linux/a.out.h
59586@@ -39,6 +39,14 @@ enum machine_type {
59587 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
59588 };
59589
59590+/* Constants for the N_FLAGS field */
59591+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
59592+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
59593+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
59594+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
59595+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
59596+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
59597+
59598 #if !defined (N_MAGIC)
59599 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
59600 #endif
59601diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
59602index 06fd4bb..1caec0d 100644
59603--- a/include/linux/atmdev.h
59604+++ b/include/linux/atmdev.h
59605@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
59606 #endif
59607
59608 struct k_atm_aal_stats {
59609-#define __HANDLE_ITEM(i) atomic_t i
59610+#define __HANDLE_ITEM(i) atomic_unchecked_t i
59611 __AAL_STAT_ITEMS
59612 #undef __HANDLE_ITEM
59613 };
59614diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
59615index 366422b..1fa7f84 100644
59616--- a/include/linux/binfmts.h
59617+++ b/include/linux/binfmts.h
59618@@ -89,6 +89,7 @@ struct linux_binfmt {
59619 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
59620 int (*load_shlib)(struct file *);
59621 int (*core_dump)(struct coredump_params *cprm);
59622+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
59623 unsigned long min_coredump; /* minimal dump size */
59624 };
59625
59626diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
59627index 4d4ac24..2c3ccce 100644
59628--- a/include/linux/blkdev.h
59629+++ b/include/linux/blkdev.h
59630@@ -1376,7 +1376,7 @@ struct block_device_operations {
59631 /* this callback is with swap_lock and sometimes page table lock held */
59632 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
59633 struct module *owner;
59634-};
59635+} __do_const;
59636
59637 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
59638 unsigned long);
59639diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
59640index 4d1a074..88f929a 100644
59641--- a/include/linux/blktrace_api.h
59642+++ b/include/linux/blktrace_api.h
59643@@ -162,7 +162,7 @@ struct blk_trace {
59644 struct dentry *dir;
59645 struct dentry *dropped_file;
59646 struct dentry *msg_file;
59647- atomic_t dropped;
59648+ atomic_unchecked_t dropped;
59649 };
59650
59651 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
59652diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
59653index 83195fb..0b0f77d 100644
59654--- a/include/linux/byteorder/little_endian.h
59655+++ b/include/linux/byteorder/little_endian.h
59656@@ -42,51 +42,51 @@
59657
59658 static inline __le64 __cpu_to_le64p(const __u64 *p)
59659 {
59660- return (__force __le64)*p;
59661+ return (__force const __le64)*p;
59662 }
59663 static inline __u64 __le64_to_cpup(const __le64 *p)
59664 {
59665- return (__force __u64)*p;
59666+ return (__force const __u64)*p;
59667 }
59668 static inline __le32 __cpu_to_le32p(const __u32 *p)
59669 {
59670- return (__force __le32)*p;
59671+ return (__force const __le32)*p;
59672 }
59673 static inline __u32 __le32_to_cpup(const __le32 *p)
59674 {
59675- return (__force __u32)*p;
59676+ return (__force const __u32)*p;
59677 }
59678 static inline __le16 __cpu_to_le16p(const __u16 *p)
59679 {
59680- return (__force __le16)*p;
59681+ return (__force const __le16)*p;
59682 }
59683 static inline __u16 __le16_to_cpup(const __le16 *p)
59684 {
59685- return (__force __u16)*p;
59686+ return (__force const __u16)*p;
59687 }
59688 static inline __be64 __cpu_to_be64p(const __u64 *p)
59689 {
59690- return (__force __be64)__swab64p(p);
59691+ return (__force const __be64)__swab64p(p);
59692 }
59693 static inline __u64 __be64_to_cpup(const __be64 *p)
59694 {
59695- return __swab64p((__u64 *)p);
59696+ return __swab64p((const __u64 *)p);
59697 }
59698 static inline __be32 __cpu_to_be32p(const __u32 *p)
59699 {
59700- return (__force __be32)__swab32p(p);
59701+ return (__force const __be32)__swab32p(p);
59702 }
59703 static inline __u32 __be32_to_cpup(const __be32 *p)
59704 {
59705- return __swab32p((__u32 *)p);
59706+ return __swab32p((const __u32 *)p);
59707 }
59708 static inline __be16 __cpu_to_be16p(const __u16 *p)
59709 {
59710- return (__force __be16)__swab16p(p);
59711+ return (__force const __be16)__swab16p(p);
59712 }
59713 static inline __u16 __be16_to_cpup(const __be16 *p)
59714 {
59715- return __swab16p((__u16 *)p);
59716+ return __swab16p((const __u16 *)p);
59717 }
59718 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
59719 #define __le64_to_cpus(x) do { (void)(x); } while (0)
59720diff --git a/include/linux/cache.h b/include/linux/cache.h
59721index 4c57065..4307975 100644
59722--- a/include/linux/cache.h
59723+++ b/include/linux/cache.h
59724@@ -16,6 +16,10 @@
59725 #define __read_mostly
59726 #endif
59727
59728+#ifndef __read_only
59729+#define __read_only __read_mostly
59730+#endif
59731+
59732 #ifndef ____cacheline_aligned
59733 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
59734 #endif
59735diff --git a/include/linux/capability.h b/include/linux/capability.h
59736index 12d52de..b5f7fa7 100644
59737--- a/include/linux/capability.h
59738+++ b/include/linux/capability.h
59739@@ -548,6 +548,8 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
59740 extern bool capable(int cap);
59741 extern bool ns_capable(struct user_namespace *ns, int cap);
59742 extern bool nsown_capable(int cap);
59743+extern bool capable_nolog(int cap);
59744+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
59745
59746 /* audit system wants to get cap info from files as well */
59747 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
59748diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
59749index 42e55de..1cd0e66 100644
59750--- a/include/linux/cleancache.h
59751+++ b/include/linux/cleancache.h
59752@@ -31,7 +31,7 @@ struct cleancache_ops {
59753 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
59754 void (*invalidate_inode)(int, struct cleancache_filekey);
59755 void (*invalidate_fs)(int);
59756-};
59757+} __no_const;
59758
59759 extern struct cleancache_ops
59760 cleancache_register_ops(struct cleancache_ops *ops);
59761diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
59762index 2f40791..a62d196 100644
59763--- a/include/linux/compiler-gcc4.h
59764+++ b/include/linux/compiler-gcc4.h
59765@@ -32,6 +32,16 @@
59766 #define __linktime_error(message) __attribute__((__error__(message)))
59767
59768 #if __GNUC_MINOR__ >= 5
59769+
59770+#ifdef CONSTIFY_PLUGIN
59771+#define __no_const __attribute__((no_const))
59772+#define __do_const __attribute__((do_const))
59773+#endif
59774+
59775+#ifdef SIZE_OVERFLOW_PLUGIN
59776+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
59777+#endif
59778+
59779 /*
59780 * Mark a position in code as unreachable. This can be used to
59781 * suppress control flow warnings after asm blocks that transfer
59782@@ -47,6 +57,11 @@
59783 #define __noclone __attribute__((__noclone__))
59784
59785 #endif
59786+
59787+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
59788+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
59789+#define __bos0(ptr) __bos((ptr), 0)
59790+#define __bos1(ptr) __bos((ptr), 1)
59791 #endif
59792
59793 #if __GNUC_MINOR__ > 0
59794diff --git a/include/linux/compiler.h b/include/linux/compiler.h
59795index 923d093..726c17f 100644
59796--- a/include/linux/compiler.h
59797+++ b/include/linux/compiler.h
59798@@ -5,31 +5,62 @@
59799
59800 #ifdef __CHECKER__
59801 # define __user __attribute__((noderef, address_space(1)))
59802+# define __force_user __force __user
59803 # define __kernel __attribute__((address_space(0)))
59804+# define __force_kernel __force __kernel
59805 # define __safe __attribute__((safe))
59806 # define __force __attribute__((force))
59807 # define __nocast __attribute__((nocast))
59808 # define __iomem __attribute__((noderef, address_space(2)))
59809+# define __force_iomem __force __iomem
59810 # define __acquires(x) __attribute__((context(x,0,1)))
59811 # define __releases(x) __attribute__((context(x,1,0)))
59812 # define __acquire(x) __context__(x,1)
59813 # define __release(x) __context__(x,-1)
59814 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
59815 # define __percpu __attribute__((noderef, address_space(3)))
59816+# define __force_percpu __force __percpu
59817 #ifdef CONFIG_SPARSE_RCU_POINTER
59818 # define __rcu __attribute__((noderef, address_space(4)))
59819+# define __force_rcu __force __rcu
59820 #else
59821 # define __rcu
59822+# define __force_rcu
59823 #endif
59824 extern void __chk_user_ptr(const volatile void __user *);
59825 extern void __chk_io_ptr(const volatile void __iomem *);
59826+#elif defined(CHECKER_PLUGIN)
59827+//# define __user
59828+//# define __force_user
59829+//# define __kernel
59830+//# define __force_kernel
59831+# define __safe
59832+# define __force
59833+# define __nocast
59834+# define __iomem
59835+# define __force_iomem
59836+# define __chk_user_ptr(x) (void)0
59837+# define __chk_io_ptr(x) (void)0
59838+# define __builtin_warning(x, y...) (1)
59839+# define __acquires(x)
59840+# define __releases(x)
59841+# define __acquire(x) (void)0
59842+# define __release(x) (void)0
59843+# define __cond_lock(x,c) (c)
59844+# define __percpu
59845+# define __force_percpu
59846+# define __rcu
59847+# define __force_rcu
59848 #else
59849 # define __user
59850+# define __force_user
59851 # define __kernel
59852+# define __force_kernel
59853 # define __safe
59854 # define __force
59855 # define __nocast
59856 # define __iomem
59857+# define __force_iomem
59858 # define __chk_user_ptr(x) (void)0
59859 # define __chk_io_ptr(x) (void)0
59860 # define __builtin_warning(x, y...) (1)
59861@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
59862 # define __release(x) (void)0
59863 # define __cond_lock(x,c) (c)
59864 # define __percpu
59865+# define __force_percpu
59866 # define __rcu
59867+# define __force_rcu
59868 #endif
59869
59870 #ifdef __KERNEL__
59871@@ -264,6 +297,18 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59872 # define __attribute_const__ /* unimplemented */
59873 #endif
59874
59875+#ifndef __no_const
59876+# define __no_const
59877+#endif
59878+
59879+#ifndef __do_const
59880+# define __do_const
59881+#endif
59882+
59883+#ifndef __size_overflow
59884+# define __size_overflow(...)
59885+#endif
59886+
59887 /*
59888 * Tell gcc if a function is cold. The compiler will assume any path
59889 * directly leading to the call is unlikely.
59890@@ -273,6 +318,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59891 #define __cold
59892 #endif
59893
59894+#ifndef __alloc_size
59895+#define __alloc_size(...)
59896+#endif
59897+
59898+#ifndef __bos
59899+#define __bos(ptr, arg)
59900+#endif
59901+
59902+#ifndef __bos0
59903+#define __bos0(ptr)
59904+#endif
59905+
59906+#ifndef __bos1
59907+#define __bos1(ptr)
59908+#endif
59909+
59910 /* Simple shorthand for a section definition */
59911 #ifndef __section
59912 # define __section(S) __attribute__ ((__section__(#S)))
59913@@ -308,6 +369,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59914 * use is to mediate communication between process-level code and irq/NMI
59915 * handlers, all running on the same CPU.
59916 */
59917-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
59918+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
59919+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
59920
59921 #endif /* __LINUX_COMPILER_H */
59922diff --git a/include/linux/cred.h b/include/linux/cred.h
59923index adadf71..6af5560 100644
59924--- a/include/linux/cred.h
59925+++ b/include/linux/cred.h
59926@@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
59927 static inline void validate_process_creds(void)
59928 {
59929 }
59930+static inline void validate_task_creds(struct task_struct *task)
59931+{
59932+}
59933 #endif
59934
59935 /**
59936diff --git a/include/linux/crypto.h b/include/linux/crypto.h
59937index b92eadf..b4ecdc1 100644
59938--- a/include/linux/crypto.h
59939+++ b/include/linux/crypto.h
59940@@ -373,7 +373,7 @@ struct cipher_tfm {
59941 const u8 *key, unsigned int keylen);
59942 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
59943 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
59944-};
59945+} __no_const;
59946
59947 struct hash_tfm {
59948 int (*init)(struct hash_desc *desc);
59949@@ -394,13 +394,13 @@ struct compress_tfm {
59950 int (*cot_decompress)(struct crypto_tfm *tfm,
59951 const u8 *src, unsigned int slen,
59952 u8 *dst, unsigned int *dlen);
59953-};
59954+} __no_const;
59955
59956 struct rng_tfm {
59957 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
59958 unsigned int dlen);
59959 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
59960-};
59961+} __no_const;
59962
59963 #define crt_ablkcipher crt_u.ablkcipher
59964 #define crt_aead crt_u.aead
59965diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
59966index 7925bf0..d5143d2 100644
59967--- a/include/linux/decompress/mm.h
59968+++ b/include/linux/decompress/mm.h
59969@@ -77,7 +77,7 @@ static void free(void *where)
59970 * warnings when not needed (indeed large_malloc / large_free are not
59971 * needed by inflate */
59972
59973-#define malloc(a) kmalloc(a, GFP_KERNEL)
59974+#define malloc(a) kmalloc((a), GFP_KERNEL)
59975 #define free(a) kfree(a)
59976
59977 #define large_malloc(a) vmalloc(a)
59978diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
59979index dfc099e..e583e66 100644
59980--- a/include/linux/dma-mapping.h
59981+++ b/include/linux/dma-mapping.h
59982@@ -51,7 +51,7 @@ struct dma_map_ops {
59983 u64 (*get_required_mask)(struct device *dev);
59984 #endif
59985 int is_phys;
59986-};
59987+} __do_const;
59988
59989 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
59990
59991diff --git a/include/linux/efi.h b/include/linux/efi.h
59992index ec45ccd..9923c32 100644
59993--- a/include/linux/efi.h
59994+++ b/include/linux/efi.h
59995@@ -635,7 +635,7 @@ struct efivar_operations {
59996 efi_get_variable_t *get_variable;
59997 efi_get_next_variable_t *get_next_variable;
59998 efi_set_variable_t *set_variable;
59999-};
60000+} __no_const;
60001
60002 struct efivars {
60003 /*
60004diff --git a/include/linux/elf.h b/include/linux/elf.h
60005index 999b4f5..57753b4 100644
60006--- a/include/linux/elf.h
60007+++ b/include/linux/elf.h
60008@@ -40,6 +40,17 @@ typedef __s64 Elf64_Sxword;
60009 #define PT_GNU_EH_FRAME 0x6474e550
60010
60011 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
60012+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
60013+
60014+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
60015+
60016+/* Constants for the e_flags field */
60017+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
60018+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
60019+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
60020+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
60021+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
60022+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
60023
60024 /*
60025 * Extended Numbering
60026@@ -97,6 +108,8 @@ typedef __s64 Elf64_Sxword;
60027 #define DT_DEBUG 21
60028 #define DT_TEXTREL 22
60029 #define DT_JMPREL 23
60030+#define DT_FLAGS 30
60031+ #define DF_TEXTREL 0x00000004
60032 #define DT_ENCODING 32
60033 #define OLD_DT_LOOS 0x60000000
60034 #define DT_LOOS 0x6000000d
60035@@ -243,6 +256,19 @@ typedef struct elf64_hdr {
60036 #define PF_W 0x2
60037 #define PF_X 0x1
60038
60039+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
60040+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
60041+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
60042+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
60043+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
60044+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
60045+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
60046+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
60047+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
60048+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
60049+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
60050+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
60051+
60052 typedef struct elf32_phdr{
60053 Elf32_Word p_type;
60054 Elf32_Off p_offset;
60055@@ -335,6 +361,8 @@ typedef struct elf64_shdr {
60056 #define EI_OSABI 7
60057 #define EI_PAD 8
60058
60059+#define EI_PAX 14
60060+
60061 #define ELFMAG0 0x7f /* EI_MAG */
60062 #define ELFMAG1 'E'
60063 #define ELFMAG2 'L'
60064@@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
60065 #define elf_note elf32_note
60066 #define elf_addr_t Elf32_Off
60067 #define Elf_Half Elf32_Half
60068+#define elf_dyn Elf32_Dyn
60069
60070 #else
60071
60072@@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
60073 #define elf_note elf64_note
60074 #define elf_addr_t Elf64_Off
60075 #define Elf_Half Elf64_Half
60076+#define elf_dyn Elf64_Dyn
60077
60078 #endif
60079
60080diff --git a/include/linux/filter.h b/include/linux/filter.h
60081index 8eeb205..d59bfa2 100644
60082--- a/include/linux/filter.h
60083+++ b/include/linux/filter.h
60084@@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
60085
60086 struct sk_buff;
60087 struct sock;
60088+struct bpf_jit_work;
60089
60090 struct sk_filter
60091 {
60092@@ -141,6 +142,9 @@ struct sk_filter
60093 unsigned int len; /* Number of filter blocks */
60094 unsigned int (*bpf_func)(const struct sk_buff *skb,
60095 const struct sock_filter *filter);
60096+#ifdef CONFIG_BPF_JIT
60097+ struct bpf_jit_work *work;
60098+#endif
60099 struct rcu_head rcu;
60100 struct sock_filter insns[0];
60101 };
60102diff --git a/include/linux/firewire.h b/include/linux/firewire.h
60103index cdc9b71..ce69fb5 100644
60104--- a/include/linux/firewire.h
60105+++ b/include/linux/firewire.h
60106@@ -413,7 +413,7 @@ struct fw_iso_context {
60107 union {
60108 fw_iso_callback_t sc;
60109 fw_iso_mc_callback_t mc;
60110- } callback;
60111+ } __no_const callback;
60112 void *callback_data;
60113 };
60114
60115diff --git a/include/linux/fs.h b/include/linux/fs.h
60116index 25c40b9..1bfd4f4 100644
60117--- a/include/linux/fs.h
60118+++ b/include/linux/fs.h
60119@@ -1634,7 +1634,8 @@ struct file_operations {
60120 int (*setlease)(struct file *, long, struct file_lock **);
60121 long (*fallocate)(struct file *file, int mode, loff_t offset,
60122 loff_t len);
60123-};
60124+} __do_const;
60125+typedef struct file_operations __no_const file_operations_no_const;
60126
60127 struct inode_operations {
60128 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
60129diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
60130index 003dc0f..3c4ea97 100644
60131--- a/include/linux/fs_struct.h
60132+++ b/include/linux/fs_struct.h
60133@@ -6,7 +6,7 @@
60134 #include <linux/seqlock.h>
60135
60136 struct fs_struct {
60137- int users;
60138+ atomic_t users;
60139 spinlock_t lock;
60140 seqcount_t seq;
60141 int umask;
60142diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
60143index ce31408..b1ad003 100644
60144--- a/include/linux/fscache-cache.h
60145+++ b/include/linux/fscache-cache.h
60146@@ -102,7 +102,7 @@ struct fscache_operation {
60147 fscache_operation_release_t release;
60148 };
60149
60150-extern atomic_t fscache_op_debug_id;
60151+extern atomic_unchecked_t fscache_op_debug_id;
60152 extern void fscache_op_work_func(struct work_struct *work);
60153
60154 extern void fscache_enqueue_operation(struct fscache_operation *);
60155@@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
60156 {
60157 INIT_WORK(&op->work, fscache_op_work_func);
60158 atomic_set(&op->usage, 1);
60159- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
60160+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
60161 op->processor = processor;
60162 op->release = release;
60163 INIT_LIST_HEAD(&op->pend_link);
60164diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
60165index a6dfe69..569586df 100644
60166--- a/include/linux/fsnotify.h
60167+++ b/include/linux/fsnotify.h
60168@@ -315,7 +315,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
60169 */
60170 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
60171 {
60172- return kstrdup(name, GFP_KERNEL);
60173+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
60174 }
60175
60176 /*
60177diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
60178index 91d0e0a3..035666b 100644
60179--- a/include/linux/fsnotify_backend.h
60180+++ b/include/linux/fsnotify_backend.h
60181@@ -105,6 +105,7 @@ struct fsnotify_ops {
60182 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
60183 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
60184 };
60185+typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
60186
60187 /*
60188 * A group is a "thing" that wants to receive notification about filesystem
60189diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
60190index 176a939..1462211 100644
60191--- a/include/linux/ftrace_event.h
60192+++ b/include/linux/ftrace_event.h
60193@@ -97,7 +97,7 @@ struct trace_event_functions {
60194 trace_print_func raw;
60195 trace_print_func hex;
60196 trace_print_func binary;
60197-};
60198+} __no_const;
60199
60200 struct trace_event {
60201 struct hlist_node node;
60202@@ -263,7 +263,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
60203 extern int trace_add_event_call(struct ftrace_event_call *call);
60204 extern void trace_remove_event_call(struct ftrace_event_call *call);
60205
60206-#define is_signed_type(type) (((type)(-1)) < 0)
60207+#define is_signed_type(type) (((type)(-1)) < (type)1)
60208
60209 int trace_set_clr_event(const char *system, const char *event, int set);
60210
60211diff --git a/include/linux/genhd.h b/include/linux/genhd.h
60212index 017a7fb..33a8507 100644
60213--- a/include/linux/genhd.h
60214+++ b/include/linux/genhd.h
60215@@ -185,7 +185,7 @@ struct gendisk {
60216 struct kobject *slave_dir;
60217
60218 struct timer_rand_state *random;
60219- atomic_t sync_io; /* RAID */
60220+ atomic_unchecked_t sync_io; /* RAID */
60221 struct disk_events *ev;
60222 #ifdef CONFIG_BLK_DEV_INTEGRITY
60223 struct blk_integrity *integrity;
60224diff --git a/include/linux/gracl.h b/include/linux/gracl.h
60225new file mode 100644
60226index 0000000..c938b1f
60227--- /dev/null
60228+++ b/include/linux/gracl.h
60229@@ -0,0 +1,319 @@
60230+#ifndef GR_ACL_H
60231+#define GR_ACL_H
60232+
60233+#include <linux/grdefs.h>
60234+#include <linux/resource.h>
60235+#include <linux/capability.h>
60236+#include <linux/dcache.h>
60237+#include <asm/resource.h>
60238+
60239+/* Major status information */
60240+
60241+#define GR_VERSION "grsecurity 2.9.1"
60242+#define GRSECURITY_VERSION 0x2901
60243+
60244+enum {
60245+ GR_SHUTDOWN = 0,
60246+ GR_ENABLE = 1,
60247+ GR_SPROLE = 2,
60248+ GR_RELOAD = 3,
60249+ GR_SEGVMOD = 4,
60250+ GR_STATUS = 5,
60251+ GR_UNSPROLE = 6,
60252+ GR_PASSSET = 7,
60253+ GR_SPROLEPAM = 8,
60254+};
60255+
60256+/* Password setup definitions
60257+ * kernel/grhash.c */
60258+enum {
60259+ GR_PW_LEN = 128,
60260+ GR_SALT_LEN = 16,
60261+ GR_SHA_LEN = 32,
60262+};
60263+
60264+enum {
60265+ GR_SPROLE_LEN = 64,
60266+};
60267+
60268+enum {
60269+ GR_NO_GLOB = 0,
60270+ GR_REG_GLOB,
60271+ GR_CREATE_GLOB
60272+};
60273+
60274+#define GR_NLIMITS 32
60275+
60276+/* Begin Data Structures */
60277+
60278+struct sprole_pw {
60279+ unsigned char *rolename;
60280+ unsigned char salt[GR_SALT_LEN];
60281+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
60282+};
60283+
60284+struct name_entry {
60285+ __u32 key;
60286+ ino_t inode;
60287+ dev_t device;
60288+ char *name;
60289+ __u16 len;
60290+ __u8 deleted;
60291+ struct name_entry *prev;
60292+ struct name_entry *next;
60293+};
60294+
60295+struct inodev_entry {
60296+ struct name_entry *nentry;
60297+ struct inodev_entry *prev;
60298+ struct inodev_entry *next;
60299+};
60300+
60301+struct acl_role_db {
60302+ struct acl_role_label **r_hash;
60303+ __u32 r_size;
60304+};
60305+
60306+struct inodev_db {
60307+ struct inodev_entry **i_hash;
60308+ __u32 i_size;
60309+};
60310+
60311+struct name_db {
60312+ struct name_entry **n_hash;
60313+ __u32 n_size;
60314+};
60315+
60316+struct crash_uid {
60317+ uid_t uid;
60318+ unsigned long expires;
60319+};
60320+
60321+struct gr_hash_struct {
60322+ void **table;
60323+ void **nametable;
60324+ void *first;
60325+ __u32 table_size;
60326+ __u32 used_size;
60327+ int type;
60328+};
60329+
60330+/* Userspace Grsecurity ACL data structures */
60331+
60332+struct acl_subject_label {
60333+ char *filename;
60334+ ino_t inode;
60335+ dev_t device;
60336+ __u32 mode;
60337+ kernel_cap_t cap_mask;
60338+ kernel_cap_t cap_lower;
60339+ kernel_cap_t cap_invert_audit;
60340+
60341+ struct rlimit res[GR_NLIMITS];
60342+ __u32 resmask;
60343+
60344+ __u8 user_trans_type;
60345+ __u8 group_trans_type;
60346+ uid_t *user_transitions;
60347+ gid_t *group_transitions;
60348+ __u16 user_trans_num;
60349+ __u16 group_trans_num;
60350+
60351+ __u32 sock_families[2];
60352+ __u32 ip_proto[8];
60353+ __u32 ip_type;
60354+ struct acl_ip_label **ips;
60355+ __u32 ip_num;
60356+ __u32 inaddr_any_override;
60357+
60358+ __u32 crashes;
60359+ unsigned long expires;
60360+
60361+ struct acl_subject_label *parent_subject;
60362+ struct gr_hash_struct *hash;
60363+ struct acl_subject_label *prev;
60364+ struct acl_subject_label *next;
60365+
60366+ struct acl_object_label **obj_hash;
60367+ __u32 obj_hash_size;
60368+ __u16 pax_flags;
60369+};
60370+
60371+struct role_allowed_ip {
60372+ __u32 addr;
60373+ __u32 netmask;
60374+
60375+ struct role_allowed_ip *prev;
60376+ struct role_allowed_ip *next;
60377+};
60378+
60379+struct role_transition {
60380+ char *rolename;
60381+
60382+ struct role_transition *prev;
60383+ struct role_transition *next;
60384+};
60385+
60386+struct acl_role_label {
60387+ char *rolename;
60388+ uid_t uidgid;
60389+ __u16 roletype;
60390+
60391+ __u16 auth_attempts;
60392+ unsigned long expires;
60393+
60394+ struct acl_subject_label *root_label;
60395+ struct gr_hash_struct *hash;
60396+
60397+ struct acl_role_label *prev;
60398+ struct acl_role_label *next;
60399+
60400+ struct role_transition *transitions;
60401+ struct role_allowed_ip *allowed_ips;
60402+ uid_t *domain_children;
60403+ __u16 domain_child_num;
60404+
60405+ umode_t umask;
60406+
60407+ struct acl_subject_label **subj_hash;
60408+ __u32 subj_hash_size;
60409+};
60410+
60411+struct user_acl_role_db {
60412+ struct acl_role_label **r_table;
60413+ __u32 num_pointers; /* Number of allocations to track */
60414+ __u32 num_roles; /* Number of roles */
60415+ __u32 num_domain_children; /* Number of domain children */
60416+ __u32 num_subjects; /* Number of subjects */
60417+ __u32 num_objects; /* Number of objects */
60418+};
60419+
60420+struct acl_object_label {
60421+ char *filename;
60422+ ino_t inode;
60423+ dev_t device;
60424+ __u32 mode;
60425+
60426+ struct acl_subject_label *nested;
60427+ struct acl_object_label *globbed;
60428+
60429+ /* next two structures not used */
60430+
60431+ struct acl_object_label *prev;
60432+ struct acl_object_label *next;
60433+};
60434+
60435+struct acl_ip_label {
60436+ char *iface;
60437+ __u32 addr;
60438+ __u32 netmask;
60439+ __u16 low, high;
60440+ __u8 mode;
60441+ __u32 type;
60442+ __u32 proto[8];
60443+
60444+ /* next two structures not used */
60445+
60446+ struct acl_ip_label *prev;
60447+ struct acl_ip_label *next;
60448+};
60449+
60450+struct gr_arg {
60451+ struct user_acl_role_db role_db;
60452+ unsigned char pw[GR_PW_LEN];
60453+ unsigned char salt[GR_SALT_LEN];
60454+ unsigned char sum[GR_SHA_LEN];
60455+ unsigned char sp_role[GR_SPROLE_LEN];
60456+ struct sprole_pw *sprole_pws;
60457+ dev_t segv_device;
60458+ ino_t segv_inode;
60459+ uid_t segv_uid;
60460+ __u16 num_sprole_pws;
60461+ __u16 mode;
60462+};
60463+
60464+struct gr_arg_wrapper {
60465+ struct gr_arg *arg;
60466+ __u32 version;
60467+ __u32 size;
60468+};
60469+
60470+struct subject_map {
60471+ struct acl_subject_label *user;
60472+ struct acl_subject_label *kernel;
60473+ struct subject_map *prev;
60474+ struct subject_map *next;
60475+};
60476+
60477+struct acl_subj_map_db {
60478+ struct subject_map **s_hash;
60479+ __u32 s_size;
60480+};
60481+
60482+/* End Data Structures Section */
60483+
60484+/* Hash functions generated by empirical testing by Brad Spengler
60485+ Makes good use of the low bits of the inode. Generally 0-1 times
60486+ in loop for successful match. 0-3 for unsuccessful match.
60487+ Shift/add algorithm with modulus of table size and an XOR*/
60488+
60489+static __inline__ unsigned int
60490+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
60491+{
60492+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
60493+}
60494+
60495+ static __inline__ unsigned int
60496+shash(const struct acl_subject_label *userp, const unsigned int sz)
60497+{
60498+ return ((const unsigned long)userp % sz);
60499+}
60500+
60501+static __inline__ unsigned int
60502+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
60503+{
60504+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
60505+}
60506+
60507+static __inline__ unsigned int
60508+nhash(const char *name, const __u16 len, const unsigned int sz)
60509+{
60510+ return full_name_hash((const unsigned char *)name, len) % sz;
60511+}
60512+
60513+#define FOR_EACH_ROLE_START(role) \
60514+ role = role_list; \
60515+ while (role) {
60516+
60517+#define FOR_EACH_ROLE_END(role) \
60518+ role = role->prev; \
60519+ }
60520+
60521+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
60522+ subj = NULL; \
60523+ iter = 0; \
60524+ while (iter < role->subj_hash_size) { \
60525+ if (subj == NULL) \
60526+ subj = role->subj_hash[iter]; \
60527+ if (subj == NULL) { \
60528+ iter++; \
60529+ continue; \
60530+ }
60531+
60532+#define FOR_EACH_SUBJECT_END(subj,iter) \
60533+ subj = subj->next; \
60534+ if (subj == NULL) \
60535+ iter++; \
60536+ }
60537+
60538+
60539+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
60540+ subj = role->hash->first; \
60541+ while (subj != NULL) {
60542+
60543+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
60544+ subj = subj->next; \
60545+ }
60546+
60547+#endif
60548+
60549diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
60550new file mode 100644
60551index 0000000..323ecf2
60552--- /dev/null
60553+++ b/include/linux/gralloc.h
60554@@ -0,0 +1,9 @@
60555+#ifndef __GRALLOC_H
60556+#define __GRALLOC_H
60557+
60558+void acl_free_all(void);
60559+int acl_alloc_stack_init(unsigned long size);
60560+void *acl_alloc(unsigned long len);
60561+void *acl_alloc_num(unsigned long num, unsigned long len);
60562+
60563+#endif
60564diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
60565new file mode 100644
60566index 0000000..b30e9bc
60567--- /dev/null
60568+++ b/include/linux/grdefs.h
60569@@ -0,0 +1,140 @@
60570+#ifndef GRDEFS_H
60571+#define GRDEFS_H
60572+
60573+/* Begin grsecurity status declarations */
60574+
60575+enum {
60576+ GR_READY = 0x01,
60577+ GR_STATUS_INIT = 0x00 // disabled state
60578+};
60579+
60580+/* Begin ACL declarations */
60581+
60582+/* Role flags */
60583+
60584+enum {
60585+ GR_ROLE_USER = 0x0001,
60586+ GR_ROLE_GROUP = 0x0002,
60587+ GR_ROLE_DEFAULT = 0x0004,
60588+ GR_ROLE_SPECIAL = 0x0008,
60589+ GR_ROLE_AUTH = 0x0010,
60590+ GR_ROLE_NOPW = 0x0020,
60591+ GR_ROLE_GOD = 0x0040,
60592+ GR_ROLE_LEARN = 0x0080,
60593+ GR_ROLE_TPE = 0x0100,
60594+ GR_ROLE_DOMAIN = 0x0200,
60595+ GR_ROLE_PAM = 0x0400,
60596+ GR_ROLE_PERSIST = 0x0800
60597+};
60598+
60599+/* ACL Subject and Object mode flags */
60600+enum {
60601+ GR_DELETED = 0x80000000
60602+};
60603+
60604+/* ACL Object-only mode flags */
60605+enum {
60606+ GR_READ = 0x00000001,
60607+ GR_APPEND = 0x00000002,
60608+ GR_WRITE = 0x00000004,
60609+ GR_EXEC = 0x00000008,
60610+ GR_FIND = 0x00000010,
60611+ GR_INHERIT = 0x00000020,
60612+ GR_SETID = 0x00000040,
60613+ GR_CREATE = 0x00000080,
60614+ GR_DELETE = 0x00000100,
60615+ GR_LINK = 0x00000200,
60616+ GR_AUDIT_READ = 0x00000400,
60617+ GR_AUDIT_APPEND = 0x00000800,
60618+ GR_AUDIT_WRITE = 0x00001000,
60619+ GR_AUDIT_EXEC = 0x00002000,
60620+ GR_AUDIT_FIND = 0x00004000,
60621+ GR_AUDIT_INHERIT= 0x00008000,
60622+ GR_AUDIT_SETID = 0x00010000,
60623+ GR_AUDIT_CREATE = 0x00020000,
60624+ GR_AUDIT_DELETE = 0x00040000,
60625+ GR_AUDIT_LINK = 0x00080000,
60626+ GR_PTRACERD = 0x00100000,
60627+ GR_NOPTRACE = 0x00200000,
60628+ GR_SUPPRESS = 0x00400000,
60629+ GR_NOLEARN = 0x00800000,
60630+ GR_INIT_TRANSFER= 0x01000000
60631+};
60632+
60633+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
60634+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
60635+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
60636+
60637+/* ACL subject-only mode flags */
60638+enum {
60639+ GR_KILL = 0x00000001,
60640+ GR_VIEW = 0x00000002,
60641+ GR_PROTECTED = 0x00000004,
60642+ GR_LEARN = 0x00000008,
60643+ GR_OVERRIDE = 0x00000010,
60644+ /* just a placeholder, this mode is only used in userspace */
60645+ GR_DUMMY = 0x00000020,
60646+ GR_PROTSHM = 0x00000040,
60647+ GR_KILLPROC = 0x00000080,
60648+ GR_KILLIPPROC = 0x00000100,
60649+ /* just a placeholder, this mode is only used in userspace */
60650+ GR_NOTROJAN = 0x00000200,
60651+ GR_PROTPROCFD = 0x00000400,
60652+ GR_PROCACCT = 0x00000800,
60653+ GR_RELAXPTRACE = 0x00001000,
60654+ GR_NESTED = 0x00002000,
60655+ GR_INHERITLEARN = 0x00004000,
60656+ GR_PROCFIND = 0x00008000,
60657+ GR_POVERRIDE = 0x00010000,
60658+ GR_KERNELAUTH = 0x00020000,
60659+ GR_ATSECURE = 0x00040000,
60660+ GR_SHMEXEC = 0x00080000
60661+};
60662+
60663+enum {
60664+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
60665+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
60666+ GR_PAX_ENABLE_MPROTECT = 0x0004,
60667+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
60668+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
60669+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
60670+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
60671+ GR_PAX_DISABLE_MPROTECT = 0x0400,
60672+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
60673+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
60674+};
60675+
60676+enum {
60677+ GR_ID_USER = 0x01,
60678+ GR_ID_GROUP = 0x02,
60679+};
60680+
60681+enum {
60682+ GR_ID_ALLOW = 0x01,
60683+ GR_ID_DENY = 0x02,
60684+};
60685+
60686+#define GR_CRASH_RES 31
60687+#define GR_UIDTABLE_MAX 500
60688+
60689+/* begin resource learning section */
60690+enum {
60691+ GR_RLIM_CPU_BUMP = 60,
60692+ GR_RLIM_FSIZE_BUMP = 50000,
60693+ GR_RLIM_DATA_BUMP = 10000,
60694+ GR_RLIM_STACK_BUMP = 1000,
60695+ GR_RLIM_CORE_BUMP = 10000,
60696+ GR_RLIM_RSS_BUMP = 500000,
60697+ GR_RLIM_NPROC_BUMP = 1,
60698+ GR_RLIM_NOFILE_BUMP = 5,
60699+ GR_RLIM_MEMLOCK_BUMP = 50000,
60700+ GR_RLIM_AS_BUMP = 500000,
60701+ GR_RLIM_LOCKS_BUMP = 2,
60702+ GR_RLIM_SIGPENDING_BUMP = 5,
60703+ GR_RLIM_MSGQUEUE_BUMP = 10000,
60704+ GR_RLIM_NICE_BUMP = 1,
60705+ GR_RLIM_RTPRIO_BUMP = 1,
60706+ GR_RLIM_RTTIME_BUMP = 1000000
60707+};
60708+
60709+#endif
60710diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
60711new file mode 100644
60712index 0000000..da390f1
60713--- /dev/null
60714+++ b/include/linux/grinternal.h
60715@@ -0,0 +1,221 @@
60716+#ifndef __GRINTERNAL_H
60717+#define __GRINTERNAL_H
60718+
60719+#ifdef CONFIG_GRKERNSEC
60720+
60721+#include <linux/fs.h>
60722+#include <linux/mnt_namespace.h>
60723+#include <linux/nsproxy.h>
60724+#include <linux/gracl.h>
60725+#include <linux/grdefs.h>
60726+#include <linux/grmsg.h>
60727+
60728+void gr_add_learn_entry(const char *fmt, ...)
60729+ __attribute__ ((format (printf, 1, 2)));
60730+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
60731+ const struct vfsmount *mnt);
60732+__u32 gr_check_create(const struct dentry *new_dentry,
60733+ const struct dentry *parent,
60734+ const struct vfsmount *mnt, const __u32 mode);
60735+int gr_check_protected_task(const struct task_struct *task);
60736+__u32 to_gr_audit(const __u32 reqmode);
60737+int gr_set_acls(const int type);
60738+int gr_apply_subject_to_task(struct task_struct *task);
60739+int gr_acl_is_enabled(void);
60740+char gr_roletype_to_char(void);
60741+
60742+void gr_handle_alertkill(struct task_struct *task);
60743+char *gr_to_filename(const struct dentry *dentry,
60744+ const struct vfsmount *mnt);
60745+char *gr_to_filename1(const struct dentry *dentry,
60746+ const struct vfsmount *mnt);
60747+char *gr_to_filename2(const struct dentry *dentry,
60748+ const struct vfsmount *mnt);
60749+char *gr_to_filename3(const struct dentry *dentry,
60750+ const struct vfsmount *mnt);
60751+
60752+extern int grsec_enable_ptrace_readexec;
60753+extern int grsec_enable_harden_ptrace;
60754+extern int grsec_enable_link;
60755+extern int grsec_enable_fifo;
60756+extern int grsec_enable_execve;
60757+extern int grsec_enable_shm;
60758+extern int grsec_enable_execlog;
60759+extern int grsec_enable_signal;
60760+extern int grsec_enable_audit_ptrace;
60761+extern int grsec_enable_forkfail;
60762+extern int grsec_enable_time;
60763+extern int grsec_enable_rofs;
60764+extern int grsec_enable_chroot_shmat;
60765+extern int grsec_enable_chroot_mount;
60766+extern int grsec_enable_chroot_double;
60767+extern int grsec_enable_chroot_pivot;
60768+extern int grsec_enable_chroot_chdir;
60769+extern int grsec_enable_chroot_chmod;
60770+extern int grsec_enable_chroot_mknod;
60771+extern int grsec_enable_chroot_fchdir;
60772+extern int grsec_enable_chroot_nice;
60773+extern int grsec_enable_chroot_execlog;
60774+extern int grsec_enable_chroot_caps;
60775+extern int grsec_enable_chroot_sysctl;
60776+extern int grsec_enable_chroot_unix;
60777+extern int grsec_enable_tpe;
60778+extern int grsec_tpe_gid;
60779+extern int grsec_enable_tpe_all;
60780+extern int grsec_enable_tpe_invert;
60781+extern int grsec_enable_socket_all;
60782+extern int grsec_socket_all_gid;
60783+extern int grsec_enable_socket_client;
60784+extern int grsec_socket_client_gid;
60785+extern int grsec_enable_socket_server;
60786+extern int grsec_socket_server_gid;
60787+extern int grsec_audit_gid;
60788+extern int grsec_enable_group;
60789+extern int grsec_enable_audit_textrel;
60790+extern int grsec_enable_log_rwxmaps;
60791+extern int grsec_enable_mount;
60792+extern int grsec_enable_chdir;
60793+extern int grsec_resource_logging;
60794+extern int grsec_enable_blackhole;
60795+extern int grsec_lastack_retries;
60796+extern int grsec_enable_brute;
60797+extern int grsec_lock;
60798+
60799+extern spinlock_t grsec_alert_lock;
60800+extern unsigned long grsec_alert_wtime;
60801+extern unsigned long grsec_alert_fyet;
60802+
60803+extern spinlock_t grsec_audit_lock;
60804+
60805+extern rwlock_t grsec_exec_file_lock;
60806+
60807+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
60808+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
60809+ (tsk)->exec_file->f_vfsmnt) : "/")
60810+
60811+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
60812+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
60813+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
60814+
60815+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
60816+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
60817+ (tsk)->exec_file->f_vfsmnt) : "/")
60818+
60819+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
60820+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
60821+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
60822+
60823+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
60824+
60825+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
60826+
60827+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
60828+ (task)->pid, (cred)->uid, \
60829+ (cred)->euid, (cred)->gid, (cred)->egid, \
60830+ gr_parent_task_fullpath(task), \
60831+ (task)->real_parent->comm, (task)->real_parent->pid, \
60832+ (pcred)->uid, (pcred)->euid, \
60833+ (pcred)->gid, (pcred)->egid
60834+
60835+#define GR_CHROOT_CAPS {{ \
60836+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
60837+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
60838+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
60839+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
60840+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
60841+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
60842+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
60843+
60844+#define security_learn(normal_msg,args...) \
60845+({ \
60846+ read_lock(&grsec_exec_file_lock); \
60847+ gr_add_learn_entry(normal_msg "\n", ## args); \
60848+ read_unlock(&grsec_exec_file_lock); \
60849+})
60850+
60851+enum {
60852+ GR_DO_AUDIT,
60853+ GR_DONT_AUDIT,
60854+ /* used for non-audit messages that we shouldn't kill the task on */
60855+ GR_DONT_AUDIT_GOOD
60856+};
60857+
60858+enum {
60859+ GR_TTYSNIFF,
60860+ GR_RBAC,
60861+ GR_RBAC_STR,
60862+ GR_STR_RBAC,
60863+ GR_RBAC_MODE2,
60864+ GR_RBAC_MODE3,
60865+ GR_FILENAME,
60866+ GR_SYSCTL_HIDDEN,
60867+ GR_NOARGS,
60868+ GR_ONE_INT,
60869+ GR_ONE_INT_TWO_STR,
60870+ GR_ONE_STR,
60871+ GR_STR_INT,
60872+ GR_TWO_STR_INT,
60873+ GR_TWO_INT,
60874+ GR_TWO_U64,
60875+ GR_THREE_INT,
60876+ GR_FIVE_INT_TWO_STR,
60877+ GR_TWO_STR,
60878+ GR_THREE_STR,
60879+ GR_FOUR_STR,
60880+ GR_STR_FILENAME,
60881+ GR_FILENAME_STR,
60882+ GR_FILENAME_TWO_INT,
60883+ GR_FILENAME_TWO_INT_STR,
60884+ GR_TEXTREL,
60885+ GR_PTRACE,
60886+ GR_RESOURCE,
60887+ GR_CAP,
60888+ GR_SIG,
60889+ GR_SIG2,
60890+ GR_CRASH1,
60891+ GR_CRASH2,
60892+ GR_PSACCT,
60893+ GR_RWXMAP
60894+};
60895+
60896+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
60897+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
60898+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
60899+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
60900+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
60901+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
60902+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
60903+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
60904+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
60905+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
60906+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
60907+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
60908+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
60909+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
60910+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
60911+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
60912+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
60913+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
60914+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
60915+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
60916+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
60917+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
60918+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
60919+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
60920+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
60921+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
60922+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
60923+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
60924+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
60925+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
60926+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
60927+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
60928+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
60929+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
60930+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
60931+
60932+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
60933+
60934+#endif
60935+
60936+#endif
60937diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
60938new file mode 100644
60939index 0000000..ae576a1
60940--- /dev/null
60941+++ b/include/linux/grmsg.h
60942@@ -0,0 +1,109 @@
60943+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
60944+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
60945+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
60946+#define GR_STOPMOD_MSG "denied modification of module state by "
60947+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
60948+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
60949+#define GR_IOPERM_MSG "denied use of ioperm() by "
60950+#define GR_IOPL_MSG "denied use of iopl() by "
60951+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
60952+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
60953+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
60954+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
60955+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
60956+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
60957+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
60958+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
60959+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
60960+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
60961+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
60962+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
60963+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
60964+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
60965+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
60966+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
60967+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
60968+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
60969+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
60970+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
60971+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
60972+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
60973+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
60974+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
60975+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
60976+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
60977+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
60978+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
60979+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
60980+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
60981+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
60982+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
60983+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
60984+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
60985+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
60986+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
60987+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
60988+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
60989+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
60990+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
60991+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
60992+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
60993+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
60994+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
60995+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
60996+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
60997+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
60998+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
60999+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
61000+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
61001+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
61002+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
61003+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
61004+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
61005+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
61006+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
61007+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
61008+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
61009+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
61010+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
61011+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
61012+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
61013+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
61014+#define GR_FAILFORK_MSG "failed fork with errno %s by "
61015+#define GR_NICE_CHROOT_MSG "denied priority change by "
61016+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
61017+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
61018+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
61019+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
61020+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
61021+#define GR_TIME_MSG "time set by "
61022+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
61023+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
61024+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
61025+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
61026+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
61027+#define GR_BIND_MSG "denied bind() by "
61028+#define GR_CONNECT_MSG "denied connect() by "
61029+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
61030+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
61031+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
61032+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
61033+#define GR_CAP_ACL_MSG "use of %s denied for "
61034+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
61035+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
61036+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
61037+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
61038+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
61039+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
61040+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
61041+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
61042+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
61043+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
61044+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
61045+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
61046+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
61047+#define GR_VM86_MSG "denied use of vm86 by "
61048+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
61049+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
61050+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
61051+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
61052diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
61053new file mode 100644
61054index 0000000..acd05db
61055--- /dev/null
61056+++ b/include/linux/grsecurity.h
61057@@ -0,0 +1,232 @@
61058+#ifndef GR_SECURITY_H
61059+#define GR_SECURITY_H
61060+#include <linux/fs.h>
61061+#include <linux/fs_struct.h>
61062+#include <linux/binfmts.h>
61063+#include <linux/gracl.h>
61064+
61065+/* notify of brain-dead configs */
61066+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61067+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
61068+#endif
61069+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
61070+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
61071+#endif
61072+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
61073+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
61074+#endif
61075+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
61076+#error "CONFIG_PAX enabled, but no PaX options are enabled."
61077+#endif
61078+
61079+#include <linux/compat.h>
61080+
61081+struct user_arg_ptr {
61082+#ifdef CONFIG_COMPAT
61083+ bool is_compat;
61084+#endif
61085+ union {
61086+ const char __user *const __user *native;
61087+#ifdef CONFIG_COMPAT
61088+ compat_uptr_t __user *compat;
61089+#endif
61090+ } ptr;
61091+};
61092+
61093+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
61094+void gr_handle_brute_check(void);
61095+void gr_handle_kernel_exploit(void);
61096+int gr_process_user_ban(void);
61097+
61098+char gr_roletype_to_char(void);
61099+
61100+int gr_acl_enable_at_secure(void);
61101+
61102+int gr_check_user_change(int real, int effective, int fs);
61103+int gr_check_group_change(int real, int effective, int fs);
61104+
61105+void gr_del_task_from_ip_table(struct task_struct *p);
61106+
61107+int gr_pid_is_chrooted(struct task_struct *p);
61108+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
61109+int gr_handle_chroot_nice(void);
61110+int gr_handle_chroot_sysctl(const int op);
61111+int gr_handle_chroot_setpriority(struct task_struct *p,
61112+ const int niceval);
61113+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
61114+int gr_handle_chroot_chroot(const struct dentry *dentry,
61115+ const struct vfsmount *mnt);
61116+void gr_handle_chroot_chdir(struct path *path);
61117+int gr_handle_chroot_chmod(const struct dentry *dentry,
61118+ const struct vfsmount *mnt, const int mode);
61119+int gr_handle_chroot_mknod(const struct dentry *dentry,
61120+ const struct vfsmount *mnt, const int mode);
61121+int gr_handle_chroot_mount(const struct dentry *dentry,
61122+ const struct vfsmount *mnt,
61123+ const char *dev_name);
61124+int gr_handle_chroot_pivot(void);
61125+int gr_handle_chroot_unix(const pid_t pid);
61126+
61127+int gr_handle_rawio(const struct inode *inode);
61128+
61129+void gr_handle_ioperm(void);
61130+void gr_handle_iopl(void);
61131+
61132+umode_t gr_acl_umask(void);
61133+
61134+int gr_tpe_allow(const struct file *file);
61135+
61136+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
61137+void gr_clear_chroot_entries(struct task_struct *task);
61138+
61139+void gr_log_forkfail(const int retval);
61140+void gr_log_timechange(void);
61141+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
61142+void gr_log_chdir(const struct dentry *dentry,
61143+ const struct vfsmount *mnt);
61144+void gr_log_chroot_exec(const struct dentry *dentry,
61145+ const struct vfsmount *mnt);
61146+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
61147+void gr_log_remount(const char *devname, const int retval);
61148+void gr_log_unmount(const char *devname, const int retval);
61149+void gr_log_mount(const char *from, const char *to, const int retval);
61150+void gr_log_textrel(struct vm_area_struct *vma);
61151+void gr_log_rwxmmap(struct file *file);
61152+void gr_log_rwxmprotect(struct file *file);
61153+
61154+int gr_handle_follow_link(const struct inode *parent,
61155+ const struct inode *inode,
61156+ const struct dentry *dentry,
61157+ const struct vfsmount *mnt);
61158+int gr_handle_fifo(const struct dentry *dentry,
61159+ const struct vfsmount *mnt,
61160+ const struct dentry *dir, const int flag,
61161+ const int acc_mode);
61162+int gr_handle_hardlink(const struct dentry *dentry,
61163+ const struct vfsmount *mnt,
61164+ struct inode *inode,
61165+ const int mode, const char *to);
61166+
61167+int gr_is_capable(const int cap);
61168+int gr_is_capable_nolog(const int cap);
61169+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
61170+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
61171+
61172+void gr_learn_resource(const struct task_struct *task, const int limit,
61173+ const unsigned long wanted, const int gt);
61174+void gr_copy_label(struct task_struct *tsk);
61175+void gr_handle_crash(struct task_struct *task, const int sig);
61176+int gr_handle_signal(const struct task_struct *p, const int sig);
61177+int gr_check_crash_uid(const uid_t uid);
61178+int gr_check_protected_task(const struct task_struct *task);
61179+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
61180+int gr_acl_handle_mmap(const struct file *file,
61181+ const unsigned long prot);
61182+int gr_acl_handle_mprotect(const struct file *file,
61183+ const unsigned long prot);
61184+int gr_check_hidden_task(const struct task_struct *tsk);
61185+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
61186+ const struct vfsmount *mnt);
61187+__u32 gr_acl_handle_utime(const struct dentry *dentry,
61188+ const struct vfsmount *mnt);
61189+__u32 gr_acl_handle_access(const struct dentry *dentry,
61190+ const struct vfsmount *mnt, const int fmode);
61191+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
61192+ const struct vfsmount *mnt, umode_t *mode);
61193+__u32 gr_acl_handle_chown(const struct dentry *dentry,
61194+ const struct vfsmount *mnt);
61195+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
61196+ const struct vfsmount *mnt);
61197+int gr_handle_ptrace(struct task_struct *task, const long request);
61198+int gr_handle_proc_ptrace(struct task_struct *task);
61199+__u32 gr_acl_handle_execve(const struct dentry *dentry,
61200+ const struct vfsmount *mnt);
61201+int gr_check_crash_exec(const struct file *filp);
61202+int gr_acl_is_enabled(void);
61203+void gr_set_kernel_label(struct task_struct *task);
61204+void gr_set_role_label(struct task_struct *task, const uid_t uid,
61205+ const gid_t gid);
61206+int gr_set_proc_label(const struct dentry *dentry,
61207+ const struct vfsmount *mnt,
61208+ const int unsafe_flags);
61209+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
61210+ const struct vfsmount *mnt);
61211+__u32 gr_acl_handle_open(const struct dentry *dentry,
61212+ const struct vfsmount *mnt, int acc_mode);
61213+__u32 gr_acl_handle_creat(const struct dentry *dentry,
61214+ const struct dentry *p_dentry,
61215+ const struct vfsmount *p_mnt,
61216+ int open_flags, int acc_mode, const int imode);
61217+void gr_handle_create(const struct dentry *dentry,
61218+ const struct vfsmount *mnt);
61219+void gr_handle_proc_create(const struct dentry *dentry,
61220+ const struct inode *inode);
61221+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
61222+ const struct dentry *parent_dentry,
61223+ const struct vfsmount *parent_mnt,
61224+ const int mode);
61225+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
61226+ const struct dentry *parent_dentry,
61227+ const struct vfsmount *parent_mnt);
61228+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
61229+ const struct vfsmount *mnt);
61230+void gr_handle_delete(const ino_t ino, const dev_t dev);
61231+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
61232+ const struct vfsmount *mnt);
61233+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
61234+ const struct dentry *parent_dentry,
61235+ const struct vfsmount *parent_mnt,
61236+ const char *from);
61237+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
61238+ const struct dentry *parent_dentry,
61239+ const struct vfsmount *parent_mnt,
61240+ const struct dentry *old_dentry,
61241+ const struct vfsmount *old_mnt, const char *to);
61242+int gr_acl_handle_rename(struct dentry *new_dentry,
61243+ struct dentry *parent_dentry,
61244+ const struct vfsmount *parent_mnt,
61245+ struct dentry *old_dentry,
61246+ struct inode *old_parent_inode,
61247+ struct vfsmount *old_mnt, const char *newname);
61248+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
61249+ struct dentry *old_dentry,
61250+ struct dentry *new_dentry,
61251+ struct vfsmount *mnt, const __u8 replace);
61252+__u32 gr_check_link(const struct dentry *new_dentry,
61253+ const struct dentry *parent_dentry,
61254+ const struct vfsmount *parent_mnt,
61255+ const struct dentry *old_dentry,
61256+ const struct vfsmount *old_mnt);
61257+int gr_acl_handle_filldir(const struct file *file, const char *name,
61258+ const unsigned int namelen, const ino_t ino);
61259+
61260+__u32 gr_acl_handle_unix(const struct dentry *dentry,
61261+ const struct vfsmount *mnt);
61262+void gr_acl_handle_exit(void);
61263+void gr_acl_handle_psacct(struct task_struct *task, const long code);
61264+int gr_acl_handle_procpidmem(const struct task_struct *task);
61265+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
61266+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
61267+void gr_audit_ptrace(struct task_struct *task);
61268+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
61269+
61270+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
61271+
61272+#ifdef CONFIG_GRKERNSEC
61273+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
61274+void gr_handle_vm86(void);
61275+void gr_handle_mem_readwrite(u64 from, u64 to);
61276+
61277+void gr_log_badprocpid(const char *entry);
61278+
61279+extern int grsec_enable_dmesg;
61280+extern int grsec_disable_privio;
61281+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61282+extern int grsec_enable_chroot_findtask;
61283+#endif
61284+#ifdef CONFIG_GRKERNSEC_SETXID
61285+extern int grsec_enable_setxid;
61286+#endif
61287+#endif
61288+
61289+#endif
61290diff --git a/include/linux/grsock.h b/include/linux/grsock.h
61291new file mode 100644
61292index 0000000..e7ffaaf
61293--- /dev/null
61294+++ b/include/linux/grsock.h
61295@@ -0,0 +1,19 @@
61296+#ifndef __GRSOCK_H
61297+#define __GRSOCK_H
61298+
61299+extern void gr_attach_curr_ip(const struct sock *sk);
61300+extern int gr_handle_sock_all(const int family, const int type,
61301+ const int protocol);
61302+extern int gr_handle_sock_server(const struct sockaddr *sck);
61303+extern int gr_handle_sock_server_other(const struct sock *sck);
61304+extern int gr_handle_sock_client(const struct sockaddr *sck);
61305+extern int gr_search_connect(struct socket * sock,
61306+ struct sockaddr_in * addr);
61307+extern int gr_search_bind(struct socket * sock,
61308+ struct sockaddr_in * addr);
61309+extern int gr_search_listen(struct socket * sock);
61310+extern int gr_search_accept(struct socket * sock);
61311+extern int gr_search_socket(const int domain, const int type,
61312+ const int protocol);
61313+
61314+#endif
61315diff --git a/include/linux/hid.h b/include/linux/hid.h
61316index 3a95da6..51986f1 100644
61317--- a/include/linux/hid.h
61318+++ b/include/linux/hid.h
61319@@ -696,7 +696,7 @@ struct hid_ll_driver {
61320 unsigned int code, int value);
61321
61322 int (*parse)(struct hid_device *hdev);
61323-};
61324+} __no_const;
61325
61326 #define PM_HINT_FULLON 1<<5
61327 #define PM_HINT_NORMAL 1<<1
61328diff --git a/include/linux/highmem.h b/include/linux/highmem.h
61329index d3999b4..1304cb4 100644
61330--- a/include/linux/highmem.h
61331+++ b/include/linux/highmem.h
61332@@ -221,6 +221,18 @@ static inline void clear_highpage(struct page *page)
61333 kunmap_atomic(kaddr);
61334 }
61335
61336+static inline void sanitize_highpage(struct page *page)
61337+{
61338+ void *kaddr;
61339+ unsigned long flags;
61340+
61341+ local_irq_save(flags);
61342+ kaddr = kmap_atomic(page);
61343+ clear_page(kaddr);
61344+ kunmap_atomic(kaddr);
61345+ local_irq_restore(flags);
61346+}
61347+
61348 static inline void zero_user_segments(struct page *page,
61349 unsigned start1, unsigned end1,
61350 unsigned start2, unsigned end2)
61351diff --git a/include/linux/i2c.h b/include/linux/i2c.h
61352index 195d8b3..e20cfab 100644
61353--- a/include/linux/i2c.h
61354+++ b/include/linux/i2c.h
61355@@ -365,6 +365,7 @@ struct i2c_algorithm {
61356 /* To determine what the adapter supports */
61357 u32 (*functionality) (struct i2c_adapter *);
61358 };
61359+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
61360
61361 /*
61362 * i2c_adapter is the structure used to identify a physical i2c bus along
61363diff --git a/include/linux/i2o.h b/include/linux/i2o.h
61364index d23c3c2..eb63c81 100644
61365--- a/include/linux/i2o.h
61366+++ b/include/linux/i2o.h
61367@@ -565,7 +565,7 @@ struct i2o_controller {
61368 struct i2o_device *exec; /* Executive */
61369 #if BITS_PER_LONG == 64
61370 spinlock_t context_list_lock; /* lock for context_list */
61371- atomic_t context_list_counter; /* needed for unique contexts */
61372+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
61373 struct list_head context_list; /* list of context id's
61374 and pointers */
61375 #endif
61376diff --git a/include/linux/if_team.h b/include/linux/if_team.h
61377index 58404b0..439ed95 100644
61378--- a/include/linux/if_team.h
61379+++ b/include/linux/if_team.h
61380@@ -64,6 +64,7 @@ struct team_mode_ops {
61381 void (*port_leave)(struct team *team, struct team_port *port);
61382 void (*port_change_mac)(struct team *team, struct team_port *port);
61383 };
61384+typedef struct team_mode_ops __no_const team_mode_ops_no_const;
61385
61386 enum team_option_type {
61387 TEAM_OPTION_TYPE_U32,
61388@@ -112,7 +113,7 @@ struct team {
61389 struct list_head option_list;
61390
61391 const struct team_mode *mode;
61392- struct team_mode_ops ops;
61393+ team_mode_ops_no_const ops;
61394 long mode_priv[TEAM_MODE_PRIV_LONGS];
61395 };
61396
61397diff --git a/include/linux/init.h b/include/linux/init.h
61398index 6b95109..4aca62c 100644
61399--- a/include/linux/init.h
61400+++ b/include/linux/init.h
61401@@ -294,13 +294,13 @@ void __init parse_early_options(char *cmdline);
61402
61403 /* Each module must use one module_init(). */
61404 #define module_init(initfn) \
61405- static inline initcall_t __inittest(void) \
61406+ static inline __used initcall_t __inittest(void) \
61407 { return initfn; } \
61408 int init_module(void) __attribute__((alias(#initfn)));
61409
61410 /* This is only required if you want to be unloadable. */
61411 #define module_exit(exitfn) \
61412- static inline exitcall_t __exittest(void) \
61413+ static inline __used exitcall_t __exittest(void) \
61414 { return exitfn; } \
61415 void cleanup_module(void) __attribute__((alias(#exitfn)));
61416
61417diff --git a/include/linux/init_task.h b/include/linux/init_task.h
61418index e4baff5..83bb175 100644
61419--- a/include/linux/init_task.h
61420+++ b/include/linux/init_task.h
61421@@ -134,6 +134,12 @@ extern struct cred init_cred;
61422
61423 #define INIT_TASK_COMM "swapper"
61424
61425+#ifdef CONFIG_X86
61426+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
61427+#else
61428+#define INIT_TASK_THREAD_INFO
61429+#endif
61430+
61431 /*
61432 * INIT_TASK is used to set up the first task table, touch at
61433 * your own risk!. Base=0, limit=0x1fffff (=2MB)
61434@@ -172,6 +178,7 @@ extern struct cred init_cred;
61435 RCU_INIT_POINTER(.cred, &init_cred), \
61436 .comm = INIT_TASK_COMM, \
61437 .thread = INIT_THREAD, \
61438+ INIT_TASK_THREAD_INFO \
61439 .fs = &init_fs, \
61440 .files = &init_files, \
61441 .signal = &init_signals, \
61442diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
61443index e6ca56d..8583707 100644
61444--- a/include/linux/intel-iommu.h
61445+++ b/include/linux/intel-iommu.h
61446@@ -296,7 +296,7 @@ struct iommu_flush {
61447 u8 fm, u64 type);
61448 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
61449 unsigned int size_order, u64 type);
61450-};
61451+} __no_const;
61452
61453 enum {
61454 SR_DMAR_FECTL_REG,
61455diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
61456index 2aea5d2..0b82f0c 100644
61457--- a/include/linux/interrupt.h
61458+++ b/include/linux/interrupt.h
61459@@ -439,7 +439,7 @@ enum
61460 /* map softirq index to softirq name. update 'softirq_to_name' in
61461 * kernel/softirq.c when adding a new softirq.
61462 */
61463-extern char *softirq_to_name[NR_SOFTIRQS];
61464+extern const char * const softirq_to_name[NR_SOFTIRQS];
61465
61466 /* softirq mask and active fields moved to irq_cpustat_t in
61467 * asm/hardirq.h to get better cache usage. KAO
61468@@ -447,12 +447,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
61469
61470 struct softirq_action
61471 {
61472- void (*action)(struct softirq_action *);
61473+ void (*action)(void);
61474 };
61475
61476 asmlinkage void do_softirq(void);
61477 asmlinkage void __do_softirq(void);
61478-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
61479+extern void open_softirq(int nr, void (*action)(void));
61480 extern void softirq_init(void);
61481 extern void __raise_softirq_irqoff(unsigned int nr);
61482
61483diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
61484index 3875719..4cd454c 100644
61485--- a/include/linux/kallsyms.h
61486+++ b/include/linux/kallsyms.h
61487@@ -15,7 +15,8 @@
61488
61489 struct module;
61490
61491-#ifdef CONFIG_KALLSYMS
61492+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
61493+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
61494 /* Lookup the address for a symbol. Returns 0 if not found. */
61495 unsigned long kallsyms_lookup_name(const char *name);
61496
61497@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
61498 /* Stupid that this does nothing, but I didn't create this mess. */
61499 #define __print_symbol(fmt, addr)
61500 #endif /*CONFIG_KALLSYMS*/
61501+#else /* when included by kallsyms.c, vsnprintf.c, or
61502+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
61503+extern void __print_symbol(const char *fmt, unsigned long address);
61504+extern int sprint_backtrace(char *buffer, unsigned long address);
61505+extern int sprint_symbol(char *buffer, unsigned long address);
61506+const char *kallsyms_lookup(unsigned long addr,
61507+ unsigned long *symbolsize,
61508+ unsigned long *offset,
61509+ char **modname, char *namebuf);
61510+#endif
61511
61512 /* This macro allows us to keep printk typechecking */
61513 static __printf(1, 2)
61514diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
61515index c4d2fc1..5df9c19 100644
61516--- a/include/linux/kgdb.h
61517+++ b/include/linux/kgdb.h
61518@@ -53,7 +53,7 @@ extern int kgdb_connected;
61519 extern int kgdb_io_module_registered;
61520
61521 extern atomic_t kgdb_setting_breakpoint;
61522-extern atomic_t kgdb_cpu_doing_single_step;
61523+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
61524
61525 extern struct task_struct *kgdb_usethread;
61526 extern struct task_struct *kgdb_contthread;
61527@@ -252,7 +252,7 @@ struct kgdb_arch {
61528 void (*disable_hw_break)(struct pt_regs *regs);
61529 void (*remove_all_hw_break)(void);
61530 void (*correct_hw_break)(void);
61531-};
61532+} __do_const;
61533
61534 /**
61535 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
61536@@ -277,7 +277,7 @@ struct kgdb_io {
61537 void (*pre_exception) (void);
61538 void (*post_exception) (void);
61539 int is_console;
61540-};
61541+} __do_const;
61542
61543 extern struct kgdb_arch arch_kgdb_ops;
61544
61545diff --git a/include/linux/kmod.h b/include/linux/kmod.h
61546index dd99c32..da06047 100644
61547--- a/include/linux/kmod.h
61548+++ b/include/linux/kmod.h
61549@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
61550 * usually useless though. */
61551 extern __printf(2, 3)
61552 int __request_module(bool wait, const char *name, ...);
61553+extern __printf(3, 4)
61554+int ___request_module(bool wait, char *param_name, const char *name, ...);
61555 #define request_module(mod...) __request_module(true, mod)
61556 #define request_module_nowait(mod...) __request_module(false, mod)
61557 #define try_then_request_module(x, mod...) \
61558diff --git a/include/linux/kref.h b/include/linux/kref.h
61559index 9c07dce..a92fa71 100644
61560--- a/include/linux/kref.h
61561+++ b/include/linux/kref.h
61562@@ -63,7 +63,7 @@ static inline void kref_get(struct kref *kref)
61563 static inline int kref_sub(struct kref *kref, unsigned int count,
61564 void (*release)(struct kref *kref))
61565 {
61566- WARN_ON(release == NULL);
61567+ BUG_ON(release == NULL);
61568
61569 if (atomic_sub_and_test((int) count, &kref->refcount)) {
61570 release(kref);
61571diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
61572index 72cbf08..dd0201d 100644
61573--- a/include/linux/kvm_host.h
61574+++ b/include/linux/kvm_host.h
61575@@ -322,7 +322,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
61576 void vcpu_load(struct kvm_vcpu *vcpu);
61577 void vcpu_put(struct kvm_vcpu *vcpu);
61578
61579-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
61580+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
61581 struct module *module);
61582 void kvm_exit(void);
61583
61584@@ -486,7 +486,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
61585 struct kvm_guest_debug *dbg);
61586 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
61587
61588-int kvm_arch_init(void *opaque);
61589+int kvm_arch_init(const void *opaque);
61590 void kvm_arch_exit(void);
61591
61592 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
61593diff --git a/include/linux/libata.h b/include/linux/libata.h
61594index 6e887c7..4539601 100644
61595--- a/include/linux/libata.h
61596+++ b/include/linux/libata.h
61597@@ -910,7 +910,7 @@ struct ata_port_operations {
61598 * fields must be pointers.
61599 */
61600 const struct ata_port_operations *inherits;
61601-};
61602+} __do_const;
61603
61604 struct ata_port_info {
61605 unsigned long flags;
61606diff --git a/include/linux/mca.h b/include/linux/mca.h
61607index 3797270..7765ede 100644
61608--- a/include/linux/mca.h
61609+++ b/include/linux/mca.h
61610@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
61611 int region);
61612 void * (*mca_transform_memory)(struct mca_device *,
61613 void *memory);
61614-};
61615+} __no_const;
61616
61617 struct mca_bus {
61618 u64 default_dma_mask;
61619diff --git a/include/linux/memory.h b/include/linux/memory.h
61620index 1ac7f6e..a5794d0 100644
61621--- a/include/linux/memory.h
61622+++ b/include/linux/memory.h
61623@@ -143,7 +143,7 @@ struct memory_accessor {
61624 size_t count);
61625 ssize_t (*write)(struct memory_accessor *, const char *buf,
61626 off_t offset, size_t count);
61627-};
61628+} __no_const;
61629
61630 /*
61631 * Kernel text modification mutex, used for code patching. Users of this lock
61632diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
61633index ee96cd5..7823c3a 100644
61634--- a/include/linux/mfd/abx500.h
61635+++ b/include/linux/mfd/abx500.h
61636@@ -455,6 +455,7 @@ struct abx500_ops {
61637 int (*event_registers_startup_state_get) (struct device *, u8 *);
61638 int (*startup_irq_enabled) (struct device *, unsigned int);
61639 };
61640+typedef struct abx500_ops __no_const abx500_ops_no_const;
61641
61642 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
61643 void abx500_remove_ops(struct device *dev);
61644diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
61645index 9b07725..3d55001 100644
61646--- a/include/linux/mfd/abx500/ux500_chargalg.h
61647+++ b/include/linux/mfd/abx500/ux500_chargalg.h
61648@@ -19,7 +19,7 @@ struct ux500_charger_ops {
61649 int (*enable) (struct ux500_charger *, int, int, int);
61650 int (*kick_wd) (struct ux500_charger *);
61651 int (*update_curr) (struct ux500_charger *, int);
61652-};
61653+} __no_const;
61654
61655 /**
61656 * struct ux500_charger - power supply ux500 charger sub class
61657diff --git a/include/linux/mm.h b/include/linux/mm.h
61658index 74aa71b..4ae97ba 100644
61659--- a/include/linux/mm.h
61660+++ b/include/linux/mm.h
61661@@ -116,7 +116,14 @@ extern unsigned int kobjsize(const void *objp);
61662
61663 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
61664 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
61665+
61666+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61667+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
61668+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
61669+#else
61670 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
61671+#endif
61672+
61673 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
61674 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
61675
61676@@ -1013,34 +1020,6 @@ int set_page_dirty(struct page *page);
61677 int set_page_dirty_lock(struct page *page);
61678 int clear_page_dirty_for_io(struct page *page);
61679
61680-/* Is the vma a continuation of the stack vma above it? */
61681-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
61682-{
61683- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
61684-}
61685-
61686-static inline int stack_guard_page_start(struct vm_area_struct *vma,
61687- unsigned long addr)
61688-{
61689- return (vma->vm_flags & VM_GROWSDOWN) &&
61690- (vma->vm_start == addr) &&
61691- !vma_growsdown(vma->vm_prev, addr);
61692-}
61693-
61694-/* Is the vma a continuation of the stack vma below it? */
61695-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
61696-{
61697- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
61698-}
61699-
61700-static inline int stack_guard_page_end(struct vm_area_struct *vma,
61701- unsigned long addr)
61702-{
61703- return (vma->vm_flags & VM_GROWSUP) &&
61704- (vma->vm_end == addr) &&
61705- !vma_growsup(vma->vm_next, addr);
61706-}
61707-
61708 extern pid_t
61709 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
61710
61711@@ -1139,6 +1118,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
61712 }
61713 #endif
61714
61715+#ifdef CONFIG_MMU
61716+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
61717+#else
61718+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
61719+{
61720+ return __pgprot(0);
61721+}
61722+#endif
61723+
61724 int vma_wants_writenotify(struct vm_area_struct *vma);
61725
61726 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
61727@@ -1157,8 +1145,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
61728 {
61729 return 0;
61730 }
61731+
61732+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
61733+ unsigned long address)
61734+{
61735+ return 0;
61736+}
61737 #else
61738 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
61739+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
61740 #endif
61741
61742 #ifdef __PAGETABLE_PMD_FOLDED
61743@@ -1167,8 +1162,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
61744 {
61745 return 0;
61746 }
61747+
61748+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
61749+ unsigned long address)
61750+{
61751+ return 0;
61752+}
61753 #else
61754 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
61755+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
61756 #endif
61757
61758 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
61759@@ -1186,11 +1188,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
61760 NULL: pud_offset(pgd, address);
61761 }
61762
61763+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
61764+{
61765+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
61766+ NULL: pud_offset(pgd, address);
61767+}
61768+
61769 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
61770 {
61771 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
61772 NULL: pmd_offset(pud, address);
61773 }
61774+
61775+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
61776+{
61777+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
61778+ NULL: pmd_offset(pud, address);
61779+}
61780 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
61781
61782 #if USE_SPLIT_PTLOCKS
61783@@ -1400,6 +1414,7 @@ extern unsigned long do_mmap(struct file *, unsigned long,
61784 unsigned long, unsigned long,
61785 unsigned long, unsigned long);
61786 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
61787+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
61788
61789 /* These take the mm semaphore themselves */
61790 extern unsigned long vm_brk(unsigned long, unsigned long);
61791@@ -1462,6 +1477,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
61792 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
61793 struct vm_area_struct **pprev);
61794
61795+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
61796+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
61797+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
61798+
61799 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
61800 NULL if none. Assume start_addr < end_addr. */
61801 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
61802@@ -1490,15 +1509,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
61803 return vma;
61804 }
61805
61806-#ifdef CONFIG_MMU
61807-pgprot_t vm_get_page_prot(unsigned long vm_flags);
61808-#else
61809-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
61810-{
61811- return __pgprot(0);
61812-}
61813-#endif
61814-
61815 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
61816 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
61817 unsigned long pfn, unsigned long size, pgprot_t);
61818@@ -1602,7 +1612,7 @@ extern int unpoison_memory(unsigned long pfn);
61819 extern int sysctl_memory_failure_early_kill;
61820 extern int sysctl_memory_failure_recovery;
61821 extern void shake_page(struct page *p, int access);
61822-extern atomic_long_t mce_bad_pages;
61823+extern atomic_long_unchecked_t mce_bad_pages;
61824 extern int soft_offline_page(struct page *page, int flags);
61825
61826 extern void dump_page(struct page *page);
61827@@ -1633,5 +1643,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
61828 static inline bool page_is_guard(struct page *page) { return false; }
61829 #endif /* CONFIG_DEBUG_PAGEALLOC */
61830
61831+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
61832+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
61833+#else
61834+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
61835+#endif
61836+
61837 #endif /* __KERNEL__ */
61838 #endif /* _LINUX_MM_H */
61839diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
61840index 3cc3062..efeaeb7 100644
61841--- a/include/linux/mm_types.h
61842+++ b/include/linux/mm_types.h
61843@@ -252,6 +252,8 @@ struct vm_area_struct {
61844 #ifdef CONFIG_NUMA
61845 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
61846 #endif
61847+
61848+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
61849 };
61850
61851 struct core_thread {
61852@@ -326,7 +328,7 @@ struct mm_struct {
61853 unsigned long def_flags;
61854 unsigned long nr_ptes; /* Page table pages */
61855 unsigned long start_code, end_code, start_data, end_data;
61856- unsigned long start_brk, brk, start_stack;
61857+ unsigned long brk_gap, start_brk, brk, start_stack;
61858 unsigned long arg_start, arg_end, env_start, env_end;
61859
61860 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
61861@@ -388,6 +390,24 @@ struct mm_struct {
61862 #ifdef CONFIG_CPUMASK_OFFSTACK
61863 struct cpumask cpumask_allocation;
61864 #endif
61865+
61866+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS) || defined(CONFIG_PAX_HAVE_ACL_FLAGS) || defined(CONFIG_PAX_HOOK_ACL_FLAGS)
61867+ unsigned long pax_flags;
61868+#endif
61869+
61870+#ifdef CONFIG_PAX_DLRESOLVE
61871+ unsigned long call_dl_resolve;
61872+#endif
61873+
61874+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
61875+ unsigned long call_syscall;
61876+#endif
61877+
61878+#ifdef CONFIG_PAX_ASLR
61879+ unsigned long delta_mmap; /* randomized offset */
61880+ unsigned long delta_stack; /* randomized offset */
61881+#endif
61882+
61883 };
61884
61885 static inline void mm_init_cpumask(struct mm_struct *mm)
61886diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
61887index 1d1b1e1..2a13c78 100644
61888--- a/include/linux/mmu_notifier.h
61889+++ b/include/linux/mmu_notifier.h
61890@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
61891 */
61892 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
61893 ({ \
61894- pte_t __pte; \
61895+ pte_t ___pte; \
61896 struct vm_area_struct *___vma = __vma; \
61897 unsigned long ___address = __address; \
61898- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
61899+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
61900 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
61901- __pte; \
61902+ ___pte; \
61903 })
61904
61905 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
61906diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
61907index dff7115..0e001c8 100644
61908--- a/include/linux/mmzone.h
61909+++ b/include/linux/mmzone.h
61910@@ -380,7 +380,7 @@ struct zone {
61911 unsigned long flags; /* zone flags, see below */
61912
61913 /* Zone statistics */
61914- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61915+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61916
61917 /*
61918 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
61919diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
61920index 501da4c..ba79bb4 100644
61921--- a/include/linux/mod_devicetable.h
61922+++ b/include/linux/mod_devicetable.h
61923@@ -12,7 +12,7 @@
61924 typedef unsigned long kernel_ulong_t;
61925 #endif
61926
61927-#define PCI_ANY_ID (~0)
61928+#define PCI_ANY_ID ((__u16)~0)
61929
61930 struct pci_device_id {
61931 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
61932@@ -131,7 +131,7 @@ struct usb_device_id {
61933 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
61934 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
61935
61936-#define HID_ANY_ID (~0)
61937+#define HID_ANY_ID (~0U)
61938
61939 struct hid_device_id {
61940 __u16 bus;
61941diff --git a/include/linux/module.h b/include/linux/module.h
61942index fbcafe2..e5d9587 100644
61943--- a/include/linux/module.h
61944+++ b/include/linux/module.h
61945@@ -17,6 +17,7 @@
61946 #include <linux/moduleparam.h>
61947 #include <linux/tracepoint.h>
61948 #include <linux/export.h>
61949+#include <linux/fs.h>
61950
61951 #include <linux/percpu.h>
61952 #include <asm/module.h>
61953@@ -273,19 +274,16 @@ struct module
61954 int (*init)(void);
61955
61956 /* If this is non-NULL, vfree after init() returns */
61957- void *module_init;
61958+ void *module_init_rx, *module_init_rw;
61959
61960 /* Here is the actual code + data, vfree'd on unload. */
61961- void *module_core;
61962+ void *module_core_rx, *module_core_rw;
61963
61964 /* Here are the sizes of the init and core sections */
61965- unsigned int init_size, core_size;
61966+ unsigned int init_size_rw, core_size_rw;
61967
61968 /* The size of the executable code in each section. */
61969- unsigned int init_text_size, core_text_size;
61970-
61971- /* Size of RO sections of the module (text+rodata) */
61972- unsigned int init_ro_size, core_ro_size;
61973+ unsigned int init_size_rx, core_size_rx;
61974
61975 /* Arch-specific module values */
61976 struct mod_arch_specific arch;
61977@@ -341,6 +339,10 @@ struct module
61978 #ifdef CONFIG_EVENT_TRACING
61979 struct ftrace_event_call **trace_events;
61980 unsigned int num_trace_events;
61981+ struct file_operations trace_id;
61982+ struct file_operations trace_enable;
61983+ struct file_operations trace_format;
61984+ struct file_operations trace_filter;
61985 #endif
61986 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
61987 unsigned int num_ftrace_callsites;
61988@@ -388,16 +390,46 @@ bool is_module_address(unsigned long addr);
61989 bool is_module_percpu_address(unsigned long addr);
61990 bool is_module_text_address(unsigned long addr);
61991
61992+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
61993+{
61994+
61995+#ifdef CONFIG_PAX_KERNEXEC
61996+ if (ktla_ktva(addr) >= (unsigned long)start &&
61997+ ktla_ktva(addr) < (unsigned long)start + size)
61998+ return 1;
61999+#endif
62000+
62001+ return ((void *)addr >= start && (void *)addr < start + size);
62002+}
62003+
62004+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
62005+{
62006+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
62007+}
62008+
62009+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
62010+{
62011+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
62012+}
62013+
62014+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
62015+{
62016+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
62017+}
62018+
62019+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
62020+{
62021+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
62022+}
62023+
62024 static inline int within_module_core(unsigned long addr, struct module *mod)
62025 {
62026- return (unsigned long)mod->module_core <= addr &&
62027- addr < (unsigned long)mod->module_core + mod->core_size;
62028+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
62029 }
62030
62031 static inline int within_module_init(unsigned long addr, struct module *mod)
62032 {
62033- return (unsigned long)mod->module_init <= addr &&
62034- addr < (unsigned long)mod->module_init + mod->init_size;
62035+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
62036 }
62037
62038 /* Search for module by name: must hold module_mutex. */
62039diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
62040index b2be02e..72d2f78 100644
62041--- a/include/linux/moduleloader.h
62042+++ b/include/linux/moduleloader.h
62043@@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
62044
62045 /* Allocator used for allocating struct module, core sections and init
62046 sections. Returns NULL on failure. */
62047-void *module_alloc(unsigned long size);
62048+void *module_alloc(unsigned long size) __size_overflow(1);
62049+
62050+#ifdef CONFIG_PAX_KERNEXEC
62051+void *module_alloc_exec(unsigned long size) __size_overflow(1);
62052+#else
62053+#define module_alloc_exec(x) module_alloc(x)
62054+#endif
62055
62056 /* Free memory returned from module_alloc. */
62057 void module_free(struct module *mod, void *module_region);
62058
62059+#ifdef CONFIG_PAX_KERNEXEC
62060+void module_free_exec(struct module *mod, void *module_region);
62061+#else
62062+#define module_free_exec(x, y) module_free((x), (y))
62063+#endif
62064+
62065 /* Apply the given relocation to the (simplified) ELF. Return -error
62066 or 0. */
62067 int apply_relocate(Elf_Shdr *sechdrs,
62068diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
62069index 944bc18..042d291 100644
62070--- a/include/linux/moduleparam.h
62071+++ b/include/linux/moduleparam.h
62072@@ -286,7 +286,7 @@ static inline void __kernel_param_unlock(void)
62073 * @len is usually just sizeof(string).
62074 */
62075 #define module_param_string(name, string, len, perm) \
62076- static const struct kparam_string __param_string_##name \
62077+ static const struct kparam_string __param_string_##name __used \
62078 = { len, string }; \
62079 __module_param_call(MODULE_PARAM_PREFIX, name, \
62080 &param_ops_string, \
62081@@ -424,7 +424,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
62082 */
62083 #define module_param_array_named(name, array, type, nump, perm) \
62084 param_check_##type(name, &(array)[0]); \
62085- static const struct kparam_array __param_arr_##name \
62086+ static const struct kparam_array __param_arr_##name __used \
62087 = { .max = ARRAY_SIZE(array), .num = nump, \
62088 .ops = &param_ops_##type, \
62089 .elemsize = sizeof(array[0]), .elem = array }; \
62090diff --git a/include/linux/namei.h b/include/linux/namei.h
62091index ffc0213..2c1f2cb 100644
62092--- a/include/linux/namei.h
62093+++ b/include/linux/namei.h
62094@@ -24,7 +24,7 @@ struct nameidata {
62095 unsigned seq;
62096 int last_type;
62097 unsigned depth;
62098- char *saved_names[MAX_NESTED_LINKS + 1];
62099+ const char *saved_names[MAX_NESTED_LINKS + 1];
62100
62101 /* Intent data */
62102 union {
62103@@ -94,12 +94,12 @@ extern int follow_up(struct path *);
62104 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
62105 extern void unlock_rename(struct dentry *, struct dentry *);
62106
62107-static inline void nd_set_link(struct nameidata *nd, char *path)
62108+static inline void nd_set_link(struct nameidata *nd, const char *path)
62109 {
62110 nd->saved_names[nd->depth] = path;
62111 }
62112
62113-static inline char *nd_get_link(struct nameidata *nd)
62114+static inline const char *nd_get_link(const struct nameidata *nd)
62115 {
62116 return nd->saved_names[nd->depth];
62117 }
62118diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
62119index 33900a5..2072000 100644
62120--- a/include/linux/netdevice.h
62121+++ b/include/linux/netdevice.h
62122@@ -1003,6 +1003,7 @@ struct net_device_ops {
62123 int (*ndo_neigh_construct)(struct neighbour *n);
62124 void (*ndo_neigh_destroy)(struct neighbour *n);
62125 };
62126+typedef struct net_device_ops __no_const net_device_ops_no_const;
62127
62128 /*
62129 * The DEVICE structure.
62130@@ -1064,7 +1065,7 @@ struct net_device {
62131 int iflink;
62132
62133 struct net_device_stats stats;
62134- atomic_long_t rx_dropped; /* dropped packets by core network
62135+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
62136 * Do not use this in drivers.
62137 */
62138
62139diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
62140new file mode 100644
62141index 0000000..33f4af8
62142--- /dev/null
62143+++ b/include/linux/netfilter/xt_gradm.h
62144@@ -0,0 +1,9 @@
62145+#ifndef _LINUX_NETFILTER_XT_GRADM_H
62146+#define _LINUX_NETFILTER_XT_GRADM_H 1
62147+
62148+struct xt_gradm_mtinfo {
62149+ __u16 flags;
62150+ __u16 invflags;
62151+};
62152+
62153+#endif
62154diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
62155index c65a18a..0c05f3a 100644
62156--- a/include/linux/of_pdt.h
62157+++ b/include/linux/of_pdt.h
62158@@ -32,7 +32,7 @@ struct of_pdt_ops {
62159
62160 /* return 0 on success; fill in 'len' with number of bytes in path */
62161 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
62162-};
62163+} __no_const;
62164
62165 extern void *prom_early_alloc(unsigned long size);
62166
62167diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
62168index a4c5624..79d6d88 100644
62169--- a/include/linux/oprofile.h
62170+++ b/include/linux/oprofile.h
62171@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
62172 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
62173 char const * name, ulong * val);
62174
62175-/** Create a file for read-only access to an atomic_t. */
62176+/** Create a file for read-only access to an atomic_unchecked_t. */
62177 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
62178- char const * name, atomic_t * val);
62179+ char const * name, atomic_unchecked_t * val);
62180
62181 /** create a directory */
62182 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
62183diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
62184index ddbb6a9..be1680e 100644
62185--- a/include/linux/perf_event.h
62186+++ b/include/linux/perf_event.h
62187@@ -879,8 +879,8 @@ struct perf_event {
62188
62189 enum perf_event_active_state state;
62190 unsigned int attach_state;
62191- local64_t count;
62192- atomic64_t child_count;
62193+ local64_t count; /* PaX: fix it one day */
62194+ atomic64_unchecked_t child_count;
62195
62196 /*
62197 * These are the total time in nanoseconds that the event
62198@@ -931,8 +931,8 @@ struct perf_event {
62199 * These accumulate total time (in nanoseconds) that children
62200 * events have been enabled and running, respectively.
62201 */
62202- atomic64_t child_total_time_enabled;
62203- atomic64_t child_total_time_running;
62204+ atomic64_unchecked_t child_total_time_enabled;
62205+ atomic64_unchecked_t child_total_time_running;
62206
62207 /*
62208 * Protect attach/detach and child_list:
62209diff --git a/include/linux/personality.h b/include/linux/personality.h
62210index 8fc7dd1a..c19d89e 100644
62211--- a/include/linux/personality.h
62212+++ b/include/linux/personality.h
62213@@ -44,6 +44,7 @@ enum {
62214 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
62215 ADDR_NO_RANDOMIZE | \
62216 ADDR_COMPAT_LAYOUT | \
62217+ ADDR_LIMIT_3GB | \
62218 MMAP_PAGE_ZERO)
62219
62220 /*
62221diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
62222index e1ac1ce..0675fed 100644
62223--- a/include/linux/pipe_fs_i.h
62224+++ b/include/linux/pipe_fs_i.h
62225@@ -45,9 +45,9 @@ struct pipe_buffer {
62226 struct pipe_inode_info {
62227 wait_queue_head_t wait;
62228 unsigned int nrbufs, curbuf, buffers;
62229- unsigned int readers;
62230- unsigned int writers;
62231- unsigned int waiting_writers;
62232+ atomic_t readers;
62233+ atomic_t writers;
62234+ atomic_t waiting_writers;
62235 unsigned int r_counter;
62236 unsigned int w_counter;
62237 struct page *tmp_page;
62238diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
62239index 609daae..5392427 100644
62240--- a/include/linux/pm_runtime.h
62241+++ b/include/linux/pm_runtime.h
62242@@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
62243
62244 static inline void pm_runtime_mark_last_busy(struct device *dev)
62245 {
62246- ACCESS_ONCE(dev->power.last_busy) = jiffies;
62247+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
62248 }
62249
62250 #else /* !CONFIG_PM_RUNTIME */
62251diff --git a/include/linux/poison.h b/include/linux/poison.h
62252index 2110a81..13a11bb 100644
62253--- a/include/linux/poison.h
62254+++ b/include/linux/poison.h
62255@@ -19,8 +19,8 @@
62256 * under normal circumstances, used to verify that nobody uses
62257 * non-initialized list entries.
62258 */
62259-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
62260-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
62261+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
62262+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
62263
62264 /********** include/linux/timer.h **********/
62265 /*
62266diff --git a/include/linux/preempt.h b/include/linux/preempt.h
62267index 5a710b9..0b0dab9 100644
62268--- a/include/linux/preempt.h
62269+++ b/include/linux/preempt.h
62270@@ -126,7 +126,7 @@ struct preempt_ops {
62271 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
62272 void (*sched_out)(struct preempt_notifier *notifier,
62273 struct task_struct *next);
62274-};
62275+} __no_const;
62276
62277 /**
62278 * preempt_notifier - key for installing preemption notifiers
62279diff --git a/include/linux/printk.h b/include/linux/printk.h
62280index 0525927..a5388b6 100644
62281--- a/include/linux/printk.h
62282+++ b/include/linux/printk.h
62283@@ -94,6 +94,8 @@ void early_printk(const char *fmt, ...);
62284 extern int printk_needs_cpu(int cpu);
62285 extern void printk_tick(void);
62286
62287+extern int kptr_restrict;
62288+
62289 #ifdef CONFIG_PRINTK
62290 asmlinkage __printf(1, 0)
62291 int vprintk(const char *fmt, va_list args);
62292@@ -117,7 +119,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
62293
62294 extern int printk_delay_msec;
62295 extern int dmesg_restrict;
62296-extern int kptr_restrict;
62297
62298 void log_buf_kexec_setup(void);
62299 void __init setup_log_buf(int early);
62300diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
62301index 85c5073..51fac8b 100644
62302--- a/include/linux/proc_fs.h
62303+++ b/include/linux/proc_fs.h
62304@@ -155,6 +155,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
62305 return proc_create_data(name, mode, parent, proc_fops, NULL);
62306 }
62307
62308+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
62309+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
62310+{
62311+#ifdef CONFIG_GRKERNSEC_PROC_USER
62312+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
62313+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62314+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
62315+#else
62316+ return proc_create_data(name, mode, parent, proc_fops, NULL);
62317+#endif
62318+}
62319+
62320 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
62321 umode_t mode, struct proc_dir_entry *base,
62322 read_proc_t *read_proc, void * data)
62323@@ -258,7 +270,7 @@ union proc_op {
62324 int (*proc_show)(struct seq_file *m,
62325 struct pid_namespace *ns, struct pid *pid,
62326 struct task_struct *task);
62327-};
62328+} __no_const;
62329
62330 struct ctl_table_header;
62331 struct ctl_table;
62332diff --git a/include/linux/random.h b/include/linux/random.h
62333index 8f74538..02a1012 100644
62334--- a/include/linux/random.h
62335+++ b/include/linux/random.h
62336@@ -69,12 +69,17 @@ void srandom32(u32 seed);
62337
62338 u32 prandom32(struct rnd_state *);
62339
62340+static inline unsigned long pax_get_random_long(void)
62341+{
62342+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
62343+}
62344+
62345 /*
62346 * Handle minimum values for seeds
62347 */
62348 static inline u32 __seed(u32 x, u32 m)
62349 {
62350- return (x < m) ? x + m : x;
62351+ return (x <= m) ? x + m + 1 : x;
62352 }
62353
62354 /**
62355diff --git a/include/linux/reboot.h b/include/linux/reboot.h
62356index e0879a7..a12f962 100644
62357--- a/include/linux/reboot.h
62358+++ b/include/linux/reboot.h
62359@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
62360 * Architecture-specific implementations of sys_reboot commands.
62361 */
62362
62363-extern void machine_restart(char *cmd);
62364-extern void machine_halt(void);
62365-extern void machine_power_off(void);
62366+extern void machine_restart(char *cmd) __noreturn;
62367+extern void machine_halt(void) __noreturn;
62368+extern void machine_power_off(void) __noreturn;
62369
62370 extern void machine_shutdown(void);
62371 struct pt_regs;
62372@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
62373 */
62374
62375 extern void kernel_restart_prepare(char *cmd);
62376-extern void kernel_restart(char *cmd);
62377-extern void kernel_halt(void);
62378-extern void kernel_power_off(void);
62379+extern void kernel_restart(char *cmd) __noreturn;
62380+extern void kernel_halt(void) __noreturn;
62381+extern void kernel_power_off(void) __noreturn;
62382
62383 extern int C_A_D; /* for sysctl */
62384 void ctrl_alt_del(void);
62385@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
62386 * Emergency restart, callable from an interrupt handler.
62387 */
62388
62389-extern void emergency_restart(void);
62390+extern void emergency_restart(void) __noreturn;
62391 #include <asm/emergency-restart.h>
62392
62393 #endif
62394diff --git a/include/linux/relay.h b/include/linux/relay.h
62395index 91cacc3..b55ff74 100644
62396--- a/include/linux/relay.h
62397+++ b/include/linux/relay.h
62398@@ -160,7 +160,7 @@ struct rchan_callbacks
62399 * The callback should return 0 if successful, negative if not.
62400 */
62401 int (*remove_buf_file)(struct dentry *dentry);
62402-};
62403+} __no_const;
62404
62405 /*
62406 * CONFIG_RELAY kernel API, kernel/relay.c
62407diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
62408index 6fdf027..ff72610 100644
62409--- a/include/linux/rfkill.h
62410+++ b/include/linux/rfkill.h
62411@@ -147,6 +147,7 @@ struct rfkill_ops {
62412 void (*query)(struct rfkill *rfkill, void *data);
62413 int (*set_block)(void *data, bool blocked);
62414 };
62415+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
62416
62417 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
62418 /**
62419diff --git a/include/linux/rio.h b/include/linux/rio.h
62420index 4d50611..c6858a2 100644
62421--- a/include/linux/rio.h
62422+++ b/include/linux/rio.h
62423@@ -315,7 +315,7 @@ struct rio_ops {
62424 int mbox, void *buffer, size_t len);
62425 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
62426 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
62427-};
62428+} __no_const;
62429
62430 #define RIO_RESOURCE_MEM 0x00000100
62431 #define RIO_RESOURCE_DOORBELL 0x00000200
62432diff --git a/include/linux/rmap.h b/include/linux/rmap.h
62433index fd07c45..4676b8e 100644
62434--- a/include/linux/rmap.h
62435+++ b/include/linux/rmap.h
62436@@ -119,9 +119,9 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
62437 void anon_vma_init(void); /* create anon_vma_cachep */
62438 int anon_vma_prepare(struct vm_area_struct *);
62439 void unlink_anon_vmas(struct vm_area_struct *);
62440-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
62441+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
62442 void anon_vma_moveto_tail(struct vm_area_struct *);
62443-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
62444+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
62445
62446 static inline void anon_vma_merge(struct vm_area_struct *vma,
62447 struct vm_area_struct *next)
62448diff --git a/include/linux/sched.h b/include/linux/sched.h
62449index 81a173c..85ccd8f 100644
62450--- a/include/linux/sched.h
62451+++ b/include/linux/sched.h
62452@@ -100,6 +100,7 @@ struct bio_list;
62453 struct fs_struct;
62454 struct perf_event_context;
62455 struct blk_plug;
62456+struct linux_binprm;
62457
62458 /*
62459 * List of flags we want to share for kernel threads,
62460@@ -382,10 +383,13 @@ struct user_namespace;
62461 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
62462
62463 extern int sysctl_max_map_count;
62464+extern unsigned long sysctl_heap_stack_gap;
62465
62466 #include <linux/aio.h>
62467
62468 #ifdef CONFIG_MMU
62469+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
62470+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
62471 extern void arch_pick_mmap_layout(struct mm_struct *mm);
62472 extern unsigned long
62473 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
62474@@ -643,6 +647,17 @@ struct signal_struct {
62475 #ifdef CONFIG_TASKSTATS
62476 struct taskstats *stats;
62477 #endif
62478+
62479+#ifdef CONFIG_GRKERNSEC
62480+ u32 curr_ip;
62481+ u32 saved_ip;
62482+ u32 gr_saddr;
62483+ u32 gr_daddr;
62484+ u16 gr_sport;
62485+ u16 gr_dport;
62486+ u8 used_accept:1;
62487+#endif
62488+
62489 #ifdef CONFIG_AUDIT
62490 unsigned audit_tty;
62491 struct tty_audit_buf *tty_audit_buf;
62492@@ -726,6 +741,11 @@ struct user_struct {
62493 struct key *session_keyring; /* UID's default session keyring */
62494 #endif
62495
62496+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
62497+ unsigned int banned;
62498+ unsigned long ban_expires;
62499+#endif
62500+
62501 /* Hash table maintenance information */
62502 struct hlist_node uidhash_node;
62503 uid_t uid;
62504@@ -1386,8 +1406,8 @@ struct task_struct {
62505 struct list_head thread_group;
62506
62507 struct completion *vfork_done; /* for vfork() */
62508- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
62509- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
62510+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
62511+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
62512
62513 cputime_t utime, stime, utimescaled, stimescaled;
62514 cputime_t gtime;
62515@@ -1403,13 +1423,6 @@ struct task_struct {
62516 struct task_cputime cputime_expires;
62517 struct list_head cpu_timers[3];
62518
62519-/* process credentials */
62520- const struct cred __rcu *real_cred; /* objective and real subjective task
62521- * credentials (COW) */
62522- const struct cred __rcu *cred; /* effective (overridable) subjective task
62523- * credentials (COW) */
62524- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
62525-
62526 char comm[TASK_COMM_LEN]; /* executable name excluding path
62527 - access with [gs]et_task_comm (which lock
62528 it with task_lock())
62529@@ -1426,8 +1439,16 @@ struct task_struct {
62530 #endif
62531 /* CPU-specific state of this task */
62532 struct thread_struct thread;
62533+/* thread_info moved to task_struct */
62534+#ifdef CONFIG_X86
62535+ struct thread_info tinfo;
62536+#endif
62537 /* filesystem information */
62538 struct fs_struct *fs;
62539+
62540+ const struct cred __rcu *cred; /* effective (overridable) subjective task
62541+ * credentials (COW) */
62542+
62543 /* open file information */
62544 struct files_struct *files;
62545 /* namespaces */
62546@@ -1469,6 +1490,11 @@ struct task_struct {
62547 struct rt_mutex_waiter *pi_blocked_on;
62548 #endif
62549
62550+/* process credentials */
62551+ const struct cred __rcu *real_cred; /* objective and real subjective task
62552+ * credentials (COW) */
62553+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
62554+
62555 #ifdef CONFIG_DEBUG_MUTEXES
62556 /* mutex deadlock detection */
62557 struct mutex_waiter *blocked_on;
62558@@ -1585,6 +1611,27 @@ struct task_struct {
62559 unsigned long default_timer_slack_ns;
62560
62561 struct list_head *scm_work_list;
62562+
62563+#ifdef CONFIG_GRKERNSEC
62564+ /* grsecurity */
62565+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62566+ u64 exec_id;
62567+#endif
62568+#ifdef CONFIG_GRKERNSEC_SETXID
62569+ const struct cred *delayed_cred;
62570+#endif
62571+ struct dentry *gr_chroot_dentry;
62572+ struct acl_subject_label *acl;
62573+ struct acl_role_label *role;
62574+ struct file *exec_file;
62575+ u16 acl_role_id;
62576+ /* is this the task that authenticated to the special role */
62577+ u8 acl_sp_role;
62578+ u8 is_writable;
62579+ u8 brute;
62580+ u8 gr_is_chrooted;
62581+#endif
62582+
62583 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
62584 /* Index of current stored address in ret_stack */
62585 int curr_ret_stack;
62586@@ -1619,6 +1666,51 @@ struct task_struct {
62587 #endif
62588 };
62589
62590+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
62591+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
62592+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
62593+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
62594+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
62595+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
62596+
62597+#ifdef CONFIG_PAX_SOFTMODE
62598+extern int pax_softmode;
62599+#endif
62600+
62601+extern int pax_check_flags(unsigned long *);
62602+
62603+/* if tsk != current then task_lock must be held on it */
62604+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
62605+static inline unsigned long pax_get_flags(struct task_struct *tsk)
62606+{
62607+ if (likely(tsk->mm))
62608+ return tsk->mm->pax_flags;
62609+ else
62610+ return 0UL;
62611+}
62612+
62613+/* if tsk != current then task_lock must be held on it */
62614+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
62615+{
62616+ if (likely(tsk->mm)) {
62617+ tsk->mm->pax_flags = flags;
62618+ return 0;
62619+ }
62620+ return -EINVAL;
62621+}
62622+#endif
62623+
62624+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62625+extern void pax_set_initial_flags(struct linux_binprm *bprm);
62626+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
62627+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
62628+#endif
62629+
62630+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
62631+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
62632+extern void pax_report_refcount_overflow(struct pt_regs *regs);
62633+extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
62634+
62635 /* Future-safe accessor for struct task_struct's cpus_allowed. */
62636 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
62637
62638@@ -2138,7 +2230,9 @@ void yield(void);
62639 extern struct exec_domain default_exec_domain;
62640
62641 union thread_union {
62642+#ifndef CONFIG_X86
62643 struct thread_info thread_info;
62644+#endif
62645 unsigned long stack[THREAD_SIZE/sizeof(long)];
62646 };
62647
62648@@ -2171,6 +2265,7 @@ extern struct pid_namespace init_pid_ns;
62649 */
62650
62651 extern struct task_struct *find_task_by_vpid(pid_t nr);
62652+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
62653 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
62654 struct pid_namespace *ns);
62655
62656@@ -2314,7 +2409,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
62657 extern void exit_itimers(struct signal_struct *);
62658 extern void flush_itimer_signals(void);
62659
62660-extern void do_group_exit(int);
62661+extern __noreturn void do_group_exit(int);
62662
62663 extern void daemonize(const char *, ...);
62664 extern int allow_signal(int);
62665@@ -2515,13 +2610,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
62666
62667 #endif
62668
62669-static inline int object_is_on_stack(void *obj)
62670+static inline int object_starts_on_stack(void *obj)
62671 {
62672- void *stack = task_stack_page(current);
62673+ const void *stack = task_stack_page(current);
62674
62675 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
62676 }
62677
62678+#ifdef CONFIG_PAX_USERCOPY
62679+extern int object_is_on_stack(const void *obj, unsigned long len);
62680+#endif
62681+
62682 extern void thread_info_cache_init(void);
62683
62684 #ifdef CONFIG_DEBUG_STACK_USAGE
62685diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
62686index 899fbb4..1cb4138 100644
62687--- a/include/linux/screen_info.h
62688+++ b/include/linux/screen_info.h
62689@@ -43,7 +43,8 @@ struct screen_info {
62690 __u16 pages; /* 0x32 */
62691 __u16 vesa_attributes; /* 0x34 */
62692 __u32 capabilities; /* 0x36 */
62693- __u8 _reserved[6]; /* 0x3a */
62694+ __u16 vesapm_size; /* 0x3a */
62695+ __u8 _reserved[4]; /* 0x3c */
62696 } __attribute__((packed));
62697
62698 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
62699diff --git a/include/linux/security.h b/include/linux/security.h
62700index 673afbb..2b7454b 100644
62701--- a/include/linux/security.h
62702+++ b/include/linux/security.h
62703@@ -26,6 +26,7 @@
62704 #include <linux/capability.h>
62705 #include <linux/slab.h>
62706 #include <linux/err.h>
62707+#include <linux/grsecurity.h>
62708
62709 struct linux_binprm;
62710 struct cred;
62711diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
62712index fc61854..d7c490b 100644
62713--- a/include/linux/seq_file.h
62714+++ b/include/linux/seq_file.h
62715@@ -25,6 +25,9 @@ struct seq_file {
62716 struct mutex lock;
62717 const struct seq_operations *op;
62718 int poll_event;
62719+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62720+ u64 exec_id;
62721+#endif
62722 void *private;
62723 };
62724
62725@@ -34,6 +37,7 @@ struct seq_operations {
62726 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
62727 int (*show) (struct seq_file *m, void *v);
62728 };
62729+typedef struct seq_operations __no_const seq_operations_no_const;
62730
62731 #define SEQ_SKIP 1
62732
62733diff --git a/include/linux/shm.h b/include/linux/shm.h
62734index 92808b8..c28cac4 100644
62735--- a/include/linux/shm.h
62736+++ b/include/linux/shm.h
62737@@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
62738
62739 /* The task created the shm object. NULL if the task is dead. */
62740 struct task_struct *shm_creator;
62741+#ifdef CONFIG_GRKERNSEC
62742+ time_t shm_createtime;
62743+ pid_t shm_lapid;
62744+#endif
62745 };
62746
62747 /* shm_mode upper byte flags */
62748diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
62749index c168907..c7756db 100644
62750--- a/include/linux/skbuff.h
62751+++ b/include/linux/skbuff.h
62752@@ -666,7 +666,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
62753 */
62754 static inline int skb_queue_empty(const struct sk_buff_head *list)
62755 {
62756- return list->next == (struct sk_buff *)list;
62757+ return list->next == (const struct sk_buff *)list;
62758 }
62759
62760 /**
62761@@ -679,7 +679,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
62762 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
62763 const struct sk_buff *skb)
62764 {
62765- return skb->next == (struct sk_buff *)list;
62766+ return skb->next == (const struct sk_buff *)list;
62767 }
62768
62769 /**
62770@@ -692,7 +692,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
62771 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
62772 const struct sk_buff *skb)
62773 {
62774- return skb->prev == (struct sk_buff *)list;
62775+ return skb->prev == (const struct sk_buff *)list;
62776 }
62777
62778 /**
62779@@ -1587,7 +1587,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
62780 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
62781 */
62782 #ifndef NET_SKB_PAD
62783-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
62784+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
62785 #endif
62786
62787 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
62788diff --git a/include/linux/slab.h b/include/linux/slab.h
62789index a595dce..c403597 100644
62790--- a/include/linux/slab.h
62791+++ b/include/linux/slab.h
62792@@ -11,12 +11,20 @@
62793
62794 #include <linux/gfp.h>
62795 #include <linux/types.h>
62796+#include <linux/err.h>
62797
62798 /*
62799 * Flags to pass to kmem_cache_create().
62800 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
62801 */
62802 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
62803+
62804+#ifdef CONFIG_PAX_USERCOPY
62805+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
62806+#else
62807+#define SLAB_USERCOPY 0x00000000UL
62808+#endif
62809+
62810 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
62811 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
62812 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
62813@@ -87,10 +95,13 @@
62814 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
62815 * Both make kfree a no-op.
62816 */
62817-#define ZERO_SIZE_PTR ((void *)16)
62818+#define ZERO_SIZE_PTR \
62819+({ \
62820+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
62821+ (void *)(-MAX_ERRNO-1L); \
62822+})
62823
62824-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
62825- (unsigned long)ZERO_SIZE_PTR)
62826+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
62827
62828 /*
62829 * struct kmem_cache related prototypes
62830@@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
62831 void kfree(const void *);
62832 void kzfree(const void *);
62833 size_t ksize(const void *);
62834+void check_object_size(const void *ptr, unsigned long n, bool to);
62835
62836 /*
62837 * Allocator specific definitions. These are mainly used to establish optimized
62838@@ -240,6 +252,7 @@ size_t ksize(const void *);
62839 * for general use, and so are not documented here. For a full list of
62840 * potential flags, always refer to linux/gfp.h.
62841 */
62842+static void *kmalloc_array(size_t n, size_t size, gfp_t flags) __size_overflow(1, 2);
62843 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
62844 {
62845 if (size != 0 && n > ULONG_MAX / size)
62846@@ -298,7 +311,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
62847 */
62848 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
62849 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
62850-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
62851+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
62852 #define kmalloc_track_caller(size, flags) \
62853 __kmalloc_track_caller(size, flags, _RET_IP_)
62854 #else
62855@@ -317,7 +330,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
62856 */
62857 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
62858 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
62859-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
62860+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
62861 #define kmalloc_node_track_caller(size, flags, node) \
62862 __kmalloc_node_track_caller(size, flags, node, \
62863 _RET_IP_)
62864diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
62865index fbd1117..d4d8ef8 100644
62866--- a/include/linux/slab_def.h
62867+++ b/include/linux/slab_def.h
62868@@ -66,10 +66,10 @@ struct kmem_cache {
62869 unsigned long node_allocs;
62870 unsigned long node_frees;
62871 unsigned long node_overflow;
62872- atomic_t allochit;
62873- atomic_t allocmiss;
62874- atomic_t freehit;
62875- atomic_t freemiss;
62876+ atomic_unchecked_t allochit;
62877+ atomic_unchecked_t allocmiss;
62878+ atomic_unchecked_t freehit;
62879+ atomic_unchecked_t freemiss;
62880
62881 /*
62882 * If debugging is enabled, then the allocator can add additional
62883@@ -107,7 +107,7 @@ struct cache_sizes {
62884 extern struct cache_sizes malloc_sizes[];
62885
62886 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
62887-void *__kmalloc(size_t size, gfp_t flags);
62888+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
62889
62890 #ifdef CONFIG_TRACING
62891 extern void *kmem_cache_alloc_trace(size_t size,
62892@@ -160,7 +160,7 @@ found:
62893 }
62894
62895 #ifdef CONFIG_NUMA
62896-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
62897+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
62898 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
62899
62900 #ifdef CONFIG_TRACING
62901diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
62902index 0ec00b3..39cb7fc 100644
62903--- a/include/linux/slob_def.h
62904+++ b/include/linux/slob_def.h
62905@@ -9,7 +9,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
62906 return kmem_cache_alloc_node(cachep, flags, -1);
62907 }
62908
62909-void *__kmalloc_node(size_t size, gfp_t flags, int node);
62910+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
62911
62912 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
62913 {
62914@@ -29,6 +29,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
62915 return __kmalloc_node(size, flags, -1);
62916 }
62917
62918+static __always_inline void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
62919 static __always_inline void *__kmalloc(size_t size, gfp_t flags)
62920 {
62921 return kmalloc(size, flags);
62922diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
62923index c2f8c8b..be9e036 100644
62924--- a/include/linux/slub_def.h
62925+++ b/include/linux/slub_def.h
62926@@ -92,7 +92,7 @@ struct kmem_cache {
62927 struct kmem_cache_order_objects max;
62928 struct kmem_cache_order_objects min;
62929 gfp_t allocflags; /* gfp flags to use on each alloc */
62930- int refcount; /* Refcount for slab cache destroy */
62931+ atomic_t refcount; /* Refcount for slab cache destroy */
62932 void (*ctor)(void *);
62933 int inuse; /* Offset to metadata */
62934 int align; /* Alignment */
62935@@ -153,6 +153,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
62936 * Sorry that the following has to be that ugly but some versions of GCC
62937 * have trouble with constant propagation and loops.
62938 */
62939+static __always_inline int kmalloc_index(size_t size) __size_overflow(1);
62940 static __always_inline int kmalloc_index(size_t size)
62941 {
62942 if (!size)
62943@@ -218,7 +219,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
62944 }
62945
62946 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
62947-void *__kmalloc(size_t size, gfp_t flags);
62948+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
62949
62950 static __always_inline void *
62951 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
62952@@ -259,6 +260,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
62953 }
62954 #endif
62955
62956+static __always_inline void *kmalloc_large(size_t size, gfp_t flags) __size_overflow(1);
62957 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
62958 {
62959 unsigned int order = get_order(size);
62960@@ -284,7 +286,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
62961 }
62962
62963 #ifdef CONFIG_NUMA
62964-void *__kmalloc_node(size_t size, gfp_t flags, int node);
62965+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
62966 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
62967
62968 #ifdef CONFIG_TRACING
62969diff --git a/include/linux/sonet.h b/include/linux/sonet.h
62970index de8832d..0147b46 100644
62971--- a/include/linux/sonet.h
62972+++ b/include/linux/sonet.h
62973@@ -61,7 +61,7 @@ struct sonet_stats {
62974 #include <linux/atomic.h>
62975
62976 struct k_sonet_stats {
62977-#define __HANDLE_ITEM(i) atomic_t i
62978+#define __HANDLE_ITEM(i) atomic_unchecked_t i
62979 __SONET_ITEMS
62980 #undef __HANDLE_ITEM
62981 };
62982diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
62983index 523547e..2cb7140 100644
62984--- a/include/linux/sunrpc/clnt.h
62985+++ b/include/linux/sunrpc/clnt.h
62986@@ -174,9 +174,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
62987 {
62988 switch (sap->sa_family) {
62989 case AF_INET:
62990- return ntohs(((struct sockaddr_in *)sap)->sin_port);
62991+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
62992 case AF_INET6:
62993- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
62994+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
62995 }
62996 return 0;
62997 }
62998@@ -209,7 +209,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
62999 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
63000 const struct sockaddr *src)
63001 {
63002- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
63003+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
63004 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
63005
63006 dsin->sin_family = ssin->sin_family;
63007@@ -312,7 +312,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
63008 if (sa->sa_family != AF_INET6)
63009 return 0;
63010
63011- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
63012+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
63013 }
63014
63015 #endif /* __KERNEL__ */
63016diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
63017index dc0c3cc..8503fb6 100644
63018--- a/include/linux/sunrpc/sched.h
63019+++ b/include/linux/sunrpc/sched.h
63020@@ -106,6 +106,7 @@ struct rpc_call_ops {
63021 void (*rpc_count_stats)(struct rpc_task *, void *);
63022 void (*rpc_release)(void *);
63023 };
63024+typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
63025
63026 struct rpc_task_setup {
63027 struct rpc_task *task;
63028diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
63029index 0b8e3e6..33e0a01 100644
63030--- a/include/linux/sunrpc/svc_rdma.h
63031+++ b/include/linux/sunrpc/svc_rdma.h
63032@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
63033 extern unsigned int svcrdma_max_requests;
63034 extern unsigned int svcrdma_max_req_size;
63035
63036-extern atomic_t rdma_stat_recv;
63037-extern atomic_t rdma_stat_read;
63038-extern atomic_t rdma_stat_write;
63039-extern atomic_t rdma_stat_sq_starve;
63040-extern atomic_t rdma_stat_rq_starve;
63041-extern atomic_t rdma_stat_rq_poll;
63042-extern atomic_t rdma_stat_rq_prod;
63043-extern atomic_t rdma_stat_sq_poll;
63044-extern atomic_t rdma_stat_sq_prod;
63045+extern atomic_unchecked_t rdma_stat_recv;
63046+extern atomic_unchecked_t rdma_stat_read;
63047+extern atomic_unchecked_t rdma_stat_write;
63048+extern atomic_unchecked_t rdma_stat_sq_starve;
63049+extern atomic_unchecked_t rdma_stat_rq_starve;
63050+extern atomic_unchecked_t rdma_stat_rq_poll;
63051+extern atomic_unchecked_t rdma_stat_rq_prod;
63052+extern atomic_unchecked_t rdma_stat_sq_poll;
63053+extern atomic_unchecked_t rdma_stat_sq_prod;
63054
63055 #define RPCRDMA_VERSION 1
63056
63057diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
63058index c34b4c8..a65b67d 100644
63059--- a/include/linux/sysctl.h
63060+++ b/include/linux/sysctl.h
63061@@ -155,7 +155,11 @@ enum
63062 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
63063 };
63064
63065-
63066+#ifdef CONFIG_PAX_SOFTMODE
63067+enum {
63068+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
63069+};
63070+#endif
63071
63072 /* CTL_VM names: */
63073 enum
63074@@ -948,6 +952,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
63075
63076 extern int proc_dostring(struct ctl_table *, int,
63077 void __user *, size_t *, loff_t *);
63078+extern int proc_dostring_modpriv(struct ctl_table *, int,
63079+ void __user *, size_t *, loff_t *);
63080 extern int proc_dointvec(struct ctl_table *, int,
63081 void __user *, size_t *, loff_t *);
63082 extern int proc_dointvec_minmax(struct ctl_table *, int,
63083diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
63084index ff7dc08..893e1bd 100644
63085--- a/include/linux/tty_ldisc.h
63086+++ b/include/linux/tty_ldisc.h
63087@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
63088
63089 struct module *owner;
63090
63091- int refcount;
63092+ atomic_t refcount;
63093 };
63094
63095 struct tty_ldisc {
63096diff --git a/include/linux/types.h b/include/linux/types.h
63097index 7f480db..175c256 100644
63098--- a/include/linux/types.h
63099+++ b/include/linux/types.h
63100@@ -220,10 +220,26 @@ typedef struct {
63101 int counter;
63102 } atomic_t;
63103
63104+#ifdef CONFIG_PAX_REFCOUNT
63105+typedef struct {
63106+ int counter;
63107+} atomic_unchecked_t;
63108+#else
63109+typedef atomic_t atomic_unchecked_t;
63110+#endif
63111+
63112 #ifdef CONFIG_64BIT
63113 typedef struct {
63114 long counter;
63115 } atomic64_t;
63116+
63117+#ifdef CONFIG_PAX_REFCOUNT
63118+typedef struct {
63119+ long counter;
63120+} atomic64_unchecked_t;
63121+#else
63122+typedef atomic64_t atomic64_unchecked_t;
63123+#endif
63124 #endif
63125
63126 struct list_head {
63127diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
63128index 5ca0951..ab496a5 100644
63129--- a/include/linux/uaccess.h
63130+++ b/include/linux/uaccess.h
63131@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
63132 long ret; \
63133 mm_segment_t old_fs = get_fs(); \
63134 \
63135- set_fs(KERNEL_DS); \
63136 pagefault_disable(); \
63137- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
63138- pagefault_enable(); \
63139+ set_fs(KERNEL_DS); \
63140+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
63141 set_fs(old_fs); \
63142+ pagefault_enable(); \
63143 ret; \
63144 })
63145
63146diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
63147index 99c1b4d..bb94261 100644
63148--- a/include/linux/unaligned/access_ok.h
63149+++ b/include/linux/unaligned/access_ok.h
63150@@ -6,32 +6,32 @@
63151
63152 static inline u16 get_unaligned_le16(const void *p)
63153 {
63154- return le16_to_cpup((__le16 *)p);
63155+ return le16_to_cpup((const __le16 *)p);
63156 }
63157
63158 static inline u32 get_unaligned_le32(const void *p)
63159 {
63160- return le32_to_cpup((__le32 *)p);
63161+ return le32_to_cpup((const __le32 *)p);
63162 }
63163
63164 static inline u64 get_unaligned_le64(const void *p)
63165 {
63166- return le64_to_cpup((__le64 *)p);
63167+ return le64_to_cpup((const __le64 *)p);
63168 }
63169
63170 static inline u16 get_unaligned_be16(const void *p)
63171 {
63172- return be16_to_cpup((__be16 *)p);
63173+ return be16_to_cpup((const __be16 *)p);
63174 }
63175
63176 static inline u32 get_unaligned_be32(const void *p)
63177 {
63178- return be32_to_cpup((__be32 *)p);
63179+ return be32_to_cpup((const __be32 *)p);
63180 }
63181
63182 static inline u64 get_unaligned_be64(const void *p)
63183 {
63184- return be64_to_cpup((__be64 *)p);
63185+ return be64_to_cpup((const __be64 *)p);
63186 }
63187
63188 static inline void put_unaligned_le16(u16 val, void *p)
63189diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
63190index 547e59c..db6ad19 100644
63191--- a/include/linux/usb/renesas_usbhs.h
63192+++ b/include/linux/usb/renesas_usbhs.h
63193@@ -39,7 +39,7 @@ enum {
63194 */
63195 struct renesas_usbhs_driver_callback {
63196 int (*notify_hotplug)(struct platform_device *pdev);
63197-};
63198+} __no_const;
63199
63200 /*
63201 * callback functions for platform
63202@@ -97,7 +97,7 @@ struct renesas_usbhs_platform_callback {
63203 * VBUS control is needed for Host
63204 */
63205 int (*set_vbus)(struct platform_device *pdev, int enable);
63206-};
63207+} __no_const;
63208
63209 /*
63210 * parameters for renesas usbhs
63211diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
63212index 6f8fbcf..8259001 100644
63213--- a/include/linux/vermagic.h
63214+++ b/include/linux/vermagic.h
63215@@ -25,9 +25,35 @@
63216 #define MODULE_ARCH_VERMAGIC ""
63217 #endif
63218
63219+#ifdef CONFIG_PAX_REFCOUNT
63220+#define MODULE_PAX_REFCOUNT "REFCOUNT "
63221+#else
63222+#define MODULE_PAX_REFCOUNT ""
63223+#endif
63224+
63225+#ifdef CONSTIFY_PLUGIN
63226+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
63227+#else
63228+#define MODULE_CONSTIFY_PLUGIN ""
63229+#endif
63230+
63231+#ifdef STACKLEAK_PLUGIN
63232+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
63233+#else
63234+#define MODULE_STACKLEAK_PLUGIN ""
63235+#endif
63236+
63237+#ifdef CONFIG_GRKERNSEC
63238+#define MODULE_GRSEC "GRSEC "
63239+#else
63240+#define MODULE_GRSEC ""
63241+#endif
63242+
63243 #define VERMAGIC_STRING \
63244 UTS_RELEASE " " \
63245 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
63246 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
63247- MODULE_ARCH_VERMAGIC
63248+ MODULE_ARCH_VERMAGIC \
63249+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
63250+ MODULE_GRSEC
63251
63252diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
63253index dcdfc2b..ec79ab5 100644
63254--- a/include/linux/vmalloc.h
63255+++ b/include/linux/vmalloc.h
63256@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
63257 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
63258 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
63259 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
63260+
63261+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
63262+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
63263+#endif
63264+
63265 /* bits [20..32] reserved for arch specific ioremap internals */
63266
63267 /*
63268@@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned long size);
63269 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
63270 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
63271 unsigned long start, unsigned long end, gfp_t gfp_mask,
63272- pgprot_t prot, int node, void *caller);
63273+ pgprot_t prot, int node, void *caller) __size_overflow(1);
63274 extern void vfree(const void *addr);
63275
63276 extern void *vmap(struct page **pages, unsigned int count,
63277@@ -123,8 +128,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
63278 extern void free_vm_area(struct vm_struct *area);
63279
63280 /* for /dev/kmem */
63281-extern long vread(char *buf, char *addr, unsigned long count);
63282-extern long vwrite(char *buf, char *addr, unsigned long count);
63283+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
63284+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
63285
63286 /*
63287 * Internals. Dont't use..
63288diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
63289index 65efb92..137adbb 100644
63290--- a/include/linux/vmstat.h
63291+++ b/include/linux/vmstat.h
63292@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
63293 /*
63294 * Zone based page accounting with per cpu differentials.
63295 */
63296-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63297+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63298
63299 static inline void zone_page_state_add(long x, struct zone *zone,
63300 enum zone_stat_item item)
63301 {
63302- atomic_long_add(x, &zone->vm_stat[item]);
63303- atomic_long_add(x, &vm_stat[item]);
63304+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
63305+ atomic_long_add_unchecked(x, &vm_stat[item]);
63306 }
63307
63308 static inline unsigned long global_page_state(enum zone_stat_item item)
63309 {
63310- long x = atomic_long_read(&vm_stat[item]);
63311+ long x = atomic_long_read_unchecked(&vm_stat[item]);
63312 #ifdef CONFIG_SMP
63313 if (x < 0)
63314 x = 0;
63315@@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
63316 static inline unsigned long zone_page_state(struct zone *zone,
63317 enum zone_stat_item item)
63318 {
63319- long x = atomic_long_read(&zone->vm_stat[item]);
63320+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63321 #ifdef CONFIG_SMP
63322 if (x < 0)
63323 x = 0;
63324@@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
63325 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
63326 enum zone_stat_item item)
63327 {
63328- long x = atomic_long_read(&zone->vm_stat[item]);
63329+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63330
63331 #ifdef CONFIG_SMP
63332 int cpu;
63333@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
63334
63335 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
63336 {
63337- atomic_long_inc(&zone->vm_stat[item]);
63338- atomic_long_inc(&vm_stat[item]);
63339+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
63340+ atomic_long_inc_unchecked(&vm_stat[item]);
63341 }
63342
63343 static inline void __inc_zone_page_state(struct page *page,
63344@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
63345
63346 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
63347 {
63348- atomic_long_dec(&zone->vm_stat[item]);
63349- atomic_long_dec(&vm_stat[item]);
63350+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
63351+ atomic_long_dec_unchecked(&vm_stat[item]);
63352 }
63353
63354 static inline void __dec_zone_page_state(struct page *page,
63355diff --git a/include/linux/xattr.h b/include/linux/xattr.h
63356index e5d1220..ef6e406 100644
63357--- a/include/linux/xattr.h
63358+++ b/include/linux/xattr.h
63359@@ -57,6 +57,11 @@
63360 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
63361 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
63362
63363+/* User namespace */
63364+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
63365+#define XATTR_PAX_FLAGS_SUFFIX "flags"
63366+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
63367+
63368 #ifdef __KERNEL__
63369
63370 #include <linux/types.h>
63371diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
63372index 4aeff96..b378cdc 100644
63373--- a/include/media/saa7146_vv.h
63374+++ b/include/media/saa7146_vv.h
63375@@ -163,7 +163,7 @@ struct saa7146_ext_vv
63376 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
63377
63378 /* the extension can override this */
63379- struct v4l2_ioctl_ops ops;
63380+ v4l2_ioctl_ops_no_const ops;
63381 /* pointer to the saa7146 core ops */
63382 const struct v4l2_ioctl_ops *core_ops;
63383
63384diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
63385index 96d2221..2292f89 100644
63386--- a/include/media/v4l2-dev.h
63387+++ b/include/media/v4l2-dev.h
63388@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
63389
63390
63391 struct v4l2_file_operations {
63392- struct module *owner;
63393+ struct module * const owner;
63394 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
63395 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
63396 unsigned int (*poll) (struct file *, struct poll_table_struct *);
63397@@ -71,6 +71,7 @@ struct v4l2_file_operations {
63398 int (*open) (struct file *);
63399 int (*release) (struct file *);
63400 };
63401+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
63402
63403 /*
63404 * Newer version of video_device, handled by videodev2.c
63405diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
63406index 3cb939c..f23c6bb 100644
63407--- a/include/media/v4l2-ioctl.h
63408+++ b/include/media/v4l2-ioctl.h
63409@@ -281,7 +281,7 @@ struct v4l2_ioctl_ops {
63410 long (*vidioc_default) (struct file *file, void *fh,
63411 bool valid_prio, int cmd, void *arg);
63412 };
63413-
63414+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
63415
63416 /* v4l debugging and diagnostics */
63417
63418diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
63419index 6db8ecf..8c23861 100644
63420--- a/include/net/caif/caif_hsi.h
63421+++ b/include/net/caif/caif_hsi.h
63422@@ -98,7 +98,7 @@ struct cfhsi_drv {
63423 void (*rx_done_cb) (struct cfhsi_drv *drv);
63424 void (*wake_up_cb) (struct cfhsi_drv *drv);
63425 void (*wake_down_cb) (struct cfhsi_drv *drv);
63426-};
63427+} __no_const;
63428
63429 /* Structure implemented by HSI device. */
63430 struct cfhsi_dev {
63431diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
63432index 9e5425b..8136ffc 100644
63433--- a/include/net/caif/cfctrl.h
63434+++ b/include/net/caif/cfctrl.h
63435@@ -52,7 +52,7 @@ struct cfctrl_rsp {
63436 void (*radioset_rsp)(void);
63437 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
63438 struct cflayer *client_layer);
63439-};
63440+} __no_const;
63441
63442 /* Link Setup Parameters for CAIF-Links. */
63443 struct cfctrl_link_param {
63444@@ -101,8 +101,8 @@ struct cfctrl_request_info {
63445 struct cfctrl {
63446 struct cfsrvl serv;
63447 struct cfctrl_rsp res;
63448- atomic_t req_seq_no;
63449- atomic_t rsp_seq_no;
63450+ atomic_unchecked_t req_seq_no;
63451+ atomic_unchecked_t rsp_seq_no;
63452 struct list_head list;
63453 /* Protects from simultaneous access to first_req list */
63454 spinlock_t info_list_lock;
63455diff --git a/include/net/flow.h b/include/net/flow.h
63456index 6c469db..7743b8e 100644
63457--- a/include/net/flow.h
63458+++ b/include/net/flow.h
63459@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
63460
63461 extern void flow_cache_flush(void);
63462 extern void flow_cache_flush_deferred(void);
63463-extern atomic_t flow_cache_genid;
63464+extern atomic_unchecked_t flow_cache_genid;
63465
63466 #endif
63467diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
63468index b94765e..053f68b 100644
63469--- a/include/net/inetpeer.h
63470+++ b/include/net/inetpeer.h
63471@@ -48,8 +48,8 @@ struct inet_peer {
63472 */
63473 union {
63474 struct {
63475- atomic_t rid; /* Frag reception counter */
63476- atomic_t ip_id_count; /* IP ID for the next packet */
63477+ atomic_unchecked_t rid; /* Frag reception counter */
63478+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
63479 __u32 tcp_ts;
63480 __u32 tcp_ts_stamp;
63481 };
63482@@ -115,11 +115,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
63483 more++;
63484 inet_peer_refcheck(p);
63485 do {
63486- old = atomic_read(&p->ip_id_count);
63487+ old = atomic_read_unchecked(&p->ip_id_count);
63488 new = old + more;
63489 if (!new)
63490 new = 1;
63491- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
63492+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
63493 return new;
63494 }
63495
63496diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
63497index 10422ef..662570f 100644
63498--- a/include/net/ip_fib.h
63499+++ b/include/net/ip_fib.h
63500@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
63501
63502 #define FIB_RES_SADDR(net, res) \
63503 ((FIB_RES_NH(res).nh_saddr_genid == \
63504- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
63505+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
63506 FIB_RES_NH(res).nh_saddr : \
63507 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
63508 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
63509diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
63510index 72522f0..6f03a2b 100644
63511--- a/include/net/ip_vs.h
63512+++ b/include/net/ip_vs.h
63513@@ -510,7 +510,7 @@ struct ip_vs_conn {
63514 struct ip_vs_conn *control; /* Master control connection */
63515 atomic_t n_control; /* Number of controlled ones */
63516 struct ip_vs_dest *dest; /* real server */
63517- atomic_t in_pkts; /* incoming packet counter */
63518+ atomic_unchecked_t in_pkts; /* incoming packet counter */
63519
63520 /* packet transmitter for different forwarding methods. If it
63521 mangles the packet, it must return NF_DROP or better NF_STOLEN,
63522@@ -648,7 +648,7 @@ struct ip_vs_dest {
63523 __be16 port; /* port number of the server */
63524 union nf_inet_addr addr; /* IP address of the server */
63525 volatile unsigned flags; /* dest status flags */
63526- atomic_t conn_flags; /* flags to copy to conn */
63527+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
63528 atomic_t weight; /* server weight */
63529
63530 atomic_t refcnt; /* reference counter */
63531diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
63532index 69b610a..fe3962c 100644
63533--- a/include/net/irda/ircomm_core.h
63534+++ b/include/net/irda/ircomm_core.h
63535@@ -51,7 +51,7 @@ typedef struct {
63536 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
63537 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
63538 struct ircomm_info *);
63539-} call_t;
63540+} __no_const call_t;
63541
63542 struct ircomm_cb {
63543 irda_queue_t queue;
63544diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
63545index 59ba38bc..d515662 100644
63546--- a/include/net/irda/ircomm_tty.h
63547+++ b/include/net/irda/ircomm_tty.h
63548@@ -35,6 +35,7 @@
63549 #include <linux/termios.h>
63550 #include <linux/timer.h>
63551 #include <linux/tty.h> /* struct tty_struct */
63552+#include <asm/local.h>
63553
63554 #include <net/irda/irias_object.h>
63555 #include <net/irda/ircomm_core.h>
63556@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
63557 unsigned short close_delay;
63558 unsigned short closing_wait; /* time to wait before closing */
63559
63560- int open_count;
63561- int blocked_open; /* # of blocked opens */
63562+ local_t open_count;
63563+ local_t blocked_open; /* # of blocked opens */
63564
63565 /* Protect concurent access to :
63566 * o self->open_count
63567diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
63568index cc7c197..9f2da2a 100644
63569--- a/include/net/iucv/af_iucv.h
63570+++ b/include/net/iucv/af_iucv.h
63571@@ -141,7 +141,7 @@ struct iucv_sock {
63572 struct iucv_sock_list {
63573 struct hlist_head head;
63574 rwlock_t lock;
63575- atomic_t autobind_name;
63576+ atomic_unchecked_t autobind_name;
63577 };
63578
63579 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
63580diff --git a/include/net/neighbour.h b/include/net/neighbour.h
63581index 34c996f..bb3b4d4 100644
63582--- a/include/net/neighbour.h
63583+++ b/include/net/neighbour.h
63584@@ -123,7 +123,7 @@ struct neigh_ops {
63585 void (*error_report)(struct neighbour *, struct sk_buff *);
63586 int (*output)(struct neighbour *, struct sk_buff *);
63587 int (*connected_output)(struct neighbour *, struct sk_buff *);
63588-};
63589+} __do_const;
63590
63591 struct pneigh_entry {
63592 struct pneigh_entry *next;
63593diff --git a/include/net/netlink.h b/include/net/netlink.h
63594index f394fe5..fd073f9 100644
63595--- a/include/net/netlink.h
63596+++ b/include/net/netlink.h
63597@@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
63598 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
63599 {
63600 if (mark)
63601- skb_trim(skb, (unsigned char *) mark - skb->data);
63602+ skb_trim(skb, (const unsigned char *) mark - skb->data);
63603 }
63604
63605 /**
63606diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
63607index bbd023a..97c6d0d 100644
63608--- a/include/net/netns/ipv4.h
63609+++ b/include/net/netns/ipv4.h
63610@@ -57,8 +57,8 @@ struct netns_ipv4 {
63611 unsigned int sysctl_ping_group_range[2];
63612 long sysctl_tcp_mem[3];
63613
63614- atomic_t rt_genid;
63615- atomic_t dev_addr_genid;
63616+ atomic_unchecked_t rt_genid;
63617+ atomic_unchecked_t dev_addr_genid;
63618
63619 #ifdef CONFIG_IP_MROUTE
63620 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
63621diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
63622index a2ef814..31a8e3f 100644
63623--- a/include/net/sctp/sctp.h
63624+++ b/include/net/sctp/sctp.h
63625@@ -318,9 +318,9 @@ do { \
63626
63627 #else /* SCTP_DEBUG */
63628
63629-#define SCTP_DEBUG_PRINTK(whatever...)
63630-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
63631-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
63632+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
63633+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
63634+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
63635 #define SCTP_ENABLE_DEBUG
63636 #define SCTP_DISABLE_DEBUG
63637 #define SCTP_ASSERT(expr, str, func)
63638diff --git a/include/net/sock.h b/include/net/sock.h
63639index 5a0a58a..2e3d4d0 100644
63640--- a/include/net/sock.h
63641+++ b/include/net/sock.h
63642@@ -302,7 +302,7 @@ struct sock {
63643 #ifdef CONFIG_RPS
63644 __u32 sk_rxhash;
63645 #endif
63646- atomic_t sk_drops;
63647+ atomic_unchecked_t sk_drops;
63648 int sk_rcvbuf;
63649
63650 struct sk_filter __rcu *sk_filter;
63651@@ -1691,7 +1691,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
63652 }
63653
63654 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
63655- char __user *from, char *to,
63656+ char __user *from, unsigned char *to,
63657 int copy, int offset)
63658 {
63659 if (skb->ip_summed == CHECKSUM_NONE) {
63660diff --git a/include/net/tcp.h b/include/net/tcp.h
63661index f75a04d..702cf06 100644
63662--- a/include/net/tcp.h
63663+++ b/include/net/tcp.h
63664@@ -1425,7 +1425,7 @@ struct tcp_seq_afinfo {
63665 char *name;
63666 sa_family_t family;
63667 const struct file_operations *seq_fops;
63668- struct seq_operations seq_ops;
63669+ seq_operations_no_const seq_ops;
63670 };
63671
63672 struct tcp_iter_state {
63673diff --git a/include/net/udp.h b/include/net/udp.h
63674index 5d606d9..e879f7b 100644
63675--- a/include/net/udp.h
63676+++ b/include/net/udp.h
63677@@ -244,7 +244,7 @@ struct udp_seq_afinfo {
63678 sa_family_t family;
63679 struct udp_table *udp_table;
63680 const struct file_operations *seq_fops;
63681- struct seq_operations seq_ops;
63682+ seq_operations_no_const seq_ops;
63683 };
63684
63685 struct udp_iter_state {
63686diff --git a/include/net/xfrm.h b/include/net/xfrm.h
63687index 96239e7..c85b032 100644
63688--- a/include/net/xfrm.h
63689+++ b/include/net/xfrm.h
63690@@ -505,7 +505,7 @@ struct xfrm_policy {
63691 struct timer_list timer;
63692
63693 struct flow_cache_object flo;
63694- atomic_t genid;
63695+ atomic_unchecked_t genid;
63696 u32 priority;
63697 u32 index;
63698 struct xfrm_mark mark;
63699diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
63700index 1a046b1..ee0bef0 100644
63701--- a/include/rdma/iw_cm.h
63702+++ b/include/rdma/iw_cm.h
63703@@ -122,7 +122,7 @@ struct iw_cm_verbs {
63704 int backlog);
63705
63706 int (*destroy_listen)(struct iw_cm_id *cm_id);
63707-};
63708+} __no_const;
63709
63710 /**
63711 * iw_create_cm_id - Create an IW CM identifier.
63712diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
63713index 8f9dfba..610ab6c 100644
63714--- a/include/scsi/libfc.h
63715+++ b/include/scsi/libfc.h
63716@@ -756,6 +756,7 @@ struct libfc_function_template {
63717 */
63718 void (*disc_stop_final) (struct fc_lport *);
63719 };
63720+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
63721
63722 /**
63723 * struct fc_disc - Discovery context
63724@@ -861,7 +862,7 @@ struct fc_lport {
63725 struct fc_vport *vport;
63726
63727 /* Operational Information */
63728- struct libfc_function_template tt;
63729+ libfc_function_template_no_const tt;
63730 u8 link_up;
63731 u8 qfull;
63732 enum fc_lport_state state;
63733diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
63734index 6efb2e1..cdad57f 100644
63735--- a/include/scsi/scsi_device.h
63736+++ b/include/scsi/scsi_device.h
63737@@ -162,9 +162,9 @@ struct scsi_device {
63738 unsigned int max_device_blocked; /* what device_blocked counts down from */
63739 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
63740
63741- atomic_t iorequest_cnt;
63742- atomic_t iodone_cnt;
63743- atomic_t ioerr_cnt;
63744+ atomic_unchecked_t iorequest_cnt;
63745+ atomic_unchecked_t iodone_cnt;
63746+ atomic_unchecked_t ioerr_cnt;
63747
63748 struct device sdev_gendev,
63749 sdev_dev;
63750diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
63751index 719faf1..d1154d4 100644
63752--- a/include/scsi/scsi_transport_fc.h
63753+++ b/include/scsi/scsi_transport_fc.h
63754@@ -739,7 +739,7 @@ struct fc_function_template {
63755 unsigned long show_host_system_hostname:1;
63756
63757 unsigned long disable_target_scan:1;
63758-};
63759+} __do_const;
63760
63761
63762 /**
63763diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
63764index 030b87c..98a6954 100644
63765--- a/include/sound/ak4xxx-adda.h
63766+++ b/include/sound/ak4xxx-adda.h
63767@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
63768 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
63769 unsigned char val);
63770 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
63771-};
63772+} __no_const;
63773
63774 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
63775
63776diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
63777index 8c05e47..2b5df97 100644
63778--- a/include/sound/hwdep.h
63779+++ b/include/sound/hwdep.h
63780@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
63781 struct snd_hwdep_dsp_status *status);
63782 int (*dsp_load)(struct snd_hwdep *hw,
63783 struct snd_hwdep_dsp_image *image);
63784-};
63785+} __no_const;
63786
63787 struct snd_hwdep {
63788 struct snd_card *card;
63789diff --git a/include/sound/info.h b/include/sound/info.h
63790index 9ca1a49..aba1728 100644
63791--- a/include/sound/info.h
63792+++ b/include/sound/info.h
63793@@ -44,7 +44,7 @@ struct snd_info_entry_text {
63794 struct snd_info_buffer *buffer);
63795 void (*write)(struct snd_info_entry *entry,
63796 struct snd_info_buffer *buffer);
63797-};
63798+} __no_const;
63799
63800 struct snd_info_entry_ops {
63801 int (*open)(struct snd_info_entry *entry,
63802diff --git a/include/sound/pcm.h b/include/sound/pcm.h
63803index 0d11128..814178e 100644
63804--- a/include/sound/pcm.h
63805+++ b/include/sound/pcm.h
63806@@ -81,6 +81,7 @@ struct snd_pcm_ops {
63807 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
63808 int (*ack)(struct snd_pcm_substream *substream);
63809 };
63810+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
63811
63812 /*
63813 *
63814diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
63815index af1b49e..a5d55a5 100644
63816--- a/include/sound/sb16_csp.h
63817+++ b/include/sound/sb16_csp.h
63818@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
63819 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
63820 int (*csp_stop) (struct snd_sb_csp * p);
63821 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
63822-};
63823+} __no_const;
63824
63825 /*
63826 * CSP private data
63827diff --git a/include/sound/soc.h b/include/sound/soc.h
63828index 2ebf787..0276839 100644
63829--- a/include/sound/soc.h
63830+++ b/include/sound/soc.h
63831@@ -711,7 +711,7 @@ struct snd_soc_platform_driver {
63832 /* platform IO - used for platform DAPM */
63833 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
63834 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
63835-};
63836+} __do_const;
63837
63838 struct snd_soc_platform {
63839 const char *name;
63840@@ -887,7 +887,7 @@ struct snd_soc_pcm_runtime {
63841 struct snd_soc_dai_link *dai_link;
63842 struct mutex pcm_mutex;
63843 enum snd_soc_pcm_subclass pcm_subclass;
63844- struct snd_pcm_ops ops;
63845+ snd_pcm_ops_no_const ops;
63846
63847 unsigned int complete:1;
63848 unsigned int dev_registered:1;
63849diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
63850index 4119966..1a4671c 100644
63851--- a/include/sound/ymfpci.h
63852+++ b/include/sound/ymfpci.h
63853@@ -358,7 +358,7 @@ struct snd_ymfpci {
63854 spinlock_t reg_lock;
63855 spinlock_t voice_lock;
63856 wait_queue_head_t interrupt_sleep;
63857- atomic_t interrupt_sleep_count;
63858+ atomic_unchecked_t interrupt_sleep_count;
63859 struct snd_info_entry *proc_entry;
63860 const struct firmware *dsp_microcode;
63861 const struct firmware *controller_microcode;
63862diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
63863index aaccc5f..092d568 100644
63864--- a/include/target/target_core_base.h
63865+++ b/include/target/target_core_base.h
63866@@ -447,7 +447,7 @@ struct t10_reservation_ops {
63867 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
63868 int (*t10_pr_register)(struct se_cmd *);
63869 int (*t10_pr_clear)(struct se_cmd *);
63870-};
63871+} __no_const;
63872
63873 struct t10_reservation {
63874 /* Reservation effects all target ports */
63875@@ -576,7 +576,7 @@ struct se_cmd {
63876 atomic_t t_se_count;
63877 atomic_t t_task_cdbs_left;
63878 atomic_t t_task_cdbs_ex_left;
63879- atomic_t t_task_cdbs_sent;
63880+ atomic_unchecked_t t_task_cdbs_sent;
63881 unsigned int transport_state;
63882 #define CMD_T_ABORTED (1 << 0)
63883 #define CMD_T_ACTIVE (1 << 1)
63884@@ -802,7 +802,7 @@ struct se_device {
63885 spinlock_t stats_lock;
63886 /* Active commands on this virtual SE device */
63887 atomic_t simple_cmds;
63888- atomic_t dev_ordered_id;
63889+ atomic_unchecked_t dev_ordered_id;
63890 atomic_t execute_tasks;
63891 atomic_t dev_ordered_sync;
63892 atomic_t dev_qf_count;
63893diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
63894new file mode 100644
63895index 0000000..2efe49d
63896--- /dev/null
63897+++ b/include/trace/events/fs.h
63898@@ -0,0 +1,53 @@
63899+#undef TRACE_SYSTEM
63900+#define TRACE_SYSTEM fs
63901+
63902+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
63903+#define _TRACE_FS_H
63904+
63905+#include <linux/fs.h>
63906+#include <linux/tracepoint.h>
63907+
63908+TRACE_EVENT(do_sys_open,
63909+
63910+ TP_PROTO(char *filename, int flags, int mode),
63911+
63912+ TP_ARGS(filename, flags, mode),
63913+
63914+ TP_STRUCT__entry(
63915+ __string( filename, filename )
63916+ __field( int, flags )
63917+ __field( int, mode )
63918+ ),
63919+
63920+ TP_fast_assign(
63921+ __assign_str(filename, filename);
63922+ __entry->flags = flags;
63923+ __entry->mode = mode;
63924+ ),
63925+
63926+ TP_printk("\"%s\" %x %o",
63927+ __get_str(filename), __entry->flags, __entry->mode)
63928+);
63929+
63930+TRACE_EVENT(open_exec,
63931+
63932+ TP_PROTO(const char *filename),
63933+
63934+ TP_ARGS(filename),
63935+
63936+ TP_STRUCT__entry(
63937+ __string( filename, filename )
63938+ ),
63939+
63940+ TP_fast_assign(
63941+ __assign_str(filename, filename);
63942+ ),
63943+
63944+ TP_printk("\"%s\"",
63945+ __get_str(filename))
63946+);
63947+
63948+#endif /* _TRACE_FS_H */
63949+
63950+/* This part must be outside protection */
63951+#include <trace/define_trace.h>
63952diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
63953index 1c09820..7f5ec79 100644
63954--- a/include/trace/events/irq.h
63955+++ b/include/trace/events/irq.h
63956@@ -36,7 +36,7 @@ struct softirq_action;
63957 */
63958 TRACE_EVENT(irq_handler_entry,
63959
63960- TP_PROTO(int irq, struct irqaction *action),
63961+ TP_PROTO(int irq, const struct irqaction *action),
63962
63963 TP_ARGS(irq, action),
63964
63965@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
63966 */
63967 TRACE_EVENT(irq_handler_exit,
63968
63969- TP_PROTO(int irq, struct irqaction *action, int ret),
63970+ TP_PROTO(int irq, const struct irqaction *action, int ret),
63971
63972 TP_ARGS(irq, action, ret),
63973
63974diff --git a/include/video/udlfb.h b/include/video/udlfb.h
63975index f9466fa..f4e2b81 100644
63976--- a/include/video/udlfb.h
63977+++ b/include/video/udlfb.h
63978@@ -53,10 +53,10 @@ struct dlfb_data {
63979 u32 pseudo_palette[256];
63980 int blank_mode; /*one of FB_BLANK_ */
63981 /* blit-only rendering path metrics, exposed through sysfs */
63982- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
63983- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
63984- atomic_t bytes_sent; /* to usb, after compression including overhead */
63985- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
63986+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
63987+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
63988+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
63989+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
63990 };
63991
63992 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
63993diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
63994index 0993a22..32ba2fe 100644
63995--- a/include/video/uvesafb.h
63996+++ b/include/video/uvesafb.h
63997@@ -177,6 +177,7 @@ struct uvesafb_par {
63998 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
63999 u8 pmi_setpal; /* PMI for palette changes */
64000 u16 *pmi_base; /* protected mode interface location */
64001+ u8 *pmi_code; /* protected mode code location */
64002 void *pmi_start;
64003 void *pmi_pal;
64004 u8 *vbe_state_orig; /*
64005diff --git a/init/Kconfig b/init/Kconfig
64006index 6cfd71d..73cb68d 100644
64007--- a/init/Kconfig
64008+++ b/init/Kconfig
64009@@ -790,6 +790,7 @@ endif # CGROUPS
64010
64011 config CHECKPOINT_RESTORE
64012 bool "Checkpoint/restore support" if EXPERT
64013+ depends on !GRKERNSEC
64014 default n
64015 help
64016 Enables additional kernel features in a sake of checkpoint/restore.
64017@@ -1240,7 +1241,7 @@ config SLUB_DEBUG
64018
64019 config COMPAT_BRK
64020 bool "Disable heap randomization"
64021- default y
64022+ default n
64023 help
64024 Randomizing heap placement makes heap exploits harder, but it
64025 also breaks ancient binaries (including anything libc5 based).
64026diff --git a/init/do_mounts.c b/init/do_mounts.c
64027index 42b0707..c06eef4 100644
64028--- a/init/do_mounts.c
64029+++ b/init/do_mounts.c
64030@@ -326,11 +326,11 @@ static void __init get_fs_names(char *page)
64031 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
64032 {
64033 struct super_block *s;
64034- int err = sys_mount(name, "/root", fs, flags, data);
64035+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
64036 if (err)
64037 return err;
64038
64039- sys_chdir((const char __user __force *)"/root");
64040+ sys_chdir((const char __force_user *)"/root");
64041 s = current->fs->pwd.dentry->d_sb;
64042 ROOT_DEV = s->s_dev;
64043 printk(KERN_INFO
64044@@ -450,18 +450,18 @@ void __init change_floppy(char *fmt, ...)
64045 va_start(args, fmt);
64046 vsprintf(buf, fmt, args);
64047 va_end(args);
64048- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
64049+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
64050 if (fd >= 0) {
64051 sys_ioctl(fd, FDEJECT, 0);
64052 sys_close(fd);
64053 }
64054 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
64055- fd = sys_open("/dev/console", O_RDWR, 0);
64056+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
64057 if (fd >= 0) {
64058 sys_ioctl(fd, TCGETS, (long)&termios);
64059 termios.c_lflag &= ~ICANON;
64060 sys_ioctl(fd, TCSETSF, (long)&termios);
64061- sys_read(fd, &c, 1);
64062+ sys_read(fd, (char __user *)&c, 1);
64063 termios.c_lflag |= ICANON;
64064 sys_ioctl(fd, TCSETSF, (long)&termios);
64065 sys_close(fd);
64066@@ -555,6 +555,6 @@ void __init prepare_namespace(void)
64067 mount_root();
64068 out:
64069 devtmpfs_mount("dev");
64070- sys_mount(".", "/", NULL, MS_MOVE, NULL);
64071- sys_chroot((const char __user __force *)".");
64072+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
64073+ sys_chroot((const char __force_user *)".");
64074 }
64075diff --git a/init/do_mounts.h b/init/do_mounts.h
64076index f5b978a..69dbfe8 100644
64077--- a/init/do_mounts.h
64078+++ b/init/do_mounts.h
64079@@ -15,15 +15,15 @@ extern int root_mountflags;
64080
64081 static inline int create_dev(char *name, dev_t dev)
64082 {
64083- sys_unlink(name);
64084- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
64085+ sys_unlink((char __force_user *)name);
64086+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
64087 }
64088
64089 #if BITS_PER_LONG == 32
64090 static inline u32 bstat(char *name)
64091 {
64092 struct stat64 stat;
64093- if (sys_stat64(name, &stat) != 0)
64094+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
64095 return 0;
64096 if (!S_ISBLK(stat.st_mode))
64097 return 0;
64098@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
64099 static inline u32 bstat(char *name)
64100 {
64101 struct stat stat;
64102- if (sys_newstat(name, &stat) != 0)
64103+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
64104 return 0;
64105 if (!S_ISBLK(stat.st_mode))
64106 return 0;
64107diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
64108index 9047330..de0d1fb 100644
64109--- a/init/do_mounts_initrd.c
64110+++ b/init/do_mounts_initrd.c
64111@@ -43,13 +43,13 @@ static void __init handle_initrd(void)
64112 create_dev("/dev/root.old", Root_RAM0);
64113 /* mount initrd on rootfs' /root */
64114 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
64115- sys_mkdir("/old", 0700);
64116- root_fd = sys_open("/", 0, 0);
64117- old_fd = sys_open("/old", 0, 0);
64118+ sys_mkdir((const char __force_user *)"/old", 0700);
64119+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
64120+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
64121 /* move initrd over / and chdir/chroot in initrd root */
64122- sys_chdir("/root");
64123- sys_mount(".", "/", NULL, MS_MOVE, NULL);
64124- sys_chroot(".");
64125+ sys_chdir((const char __force_user *)"/root");
64126+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
64127+ sys_chroot((const char __force_user *)".");
64128
64129 /*
64130 * In case that a resume from disk is carried out by linuxrc or one of
64131@@ -66,15 +66,15 @@ static void __init handle_initrd(void)
64132
64133 /* move initrd to rootfs' /old */
64134 sys_fchdir(old_fd);
64135- sys_mount("/", ".", NULL, MS_MOVE, NULL);
64136+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
64137 /* switch root and cwd back to / of rootfs */
64138 sys_fchdir(root_fd);
64139- sys_chroot(".");
64140+ sys_chroot((const char __force_user *)".");
64141 sys_close(old_fd);
64142 sys_close(root_fd);
64143
64144 if (new_decode_dev(real_root_dev) == Root_RAM0) {
64145- sys_chdir("/old");
64146+ sys_chdir((const char __force_user *)"/old");
64147 return;
64148 }
64149
64150@@ -82,17 +82,17 @@ static void __init handle_initrd(void)
64151 mount_root();
64152
64153 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
64154- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
64155+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
64156 if (!error)
64157 printk("okay\n");
64158 else {
64159- int fd = sys_open("/dev/root.old", O_RDWR, 0);
64160+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
64161 if (error == -ENOENT)
64162 printk("/initrd does not exist. Ignored.\n");
64163 else
64164 printk("failed\n");
64165 printk(KERN_NOTICE "Unmounting old root\n");
64166- sys_umount("/old", MNT_DETACH);
64167+ sys_umount((char __force_user *)"/old", MNT_DETACH);
64168 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
64169 if (fd < 0) {
64170 error = fd;
64171@@ -115,11 +115,11 @@ int __init initrd_load(void)
64172 * mounted in the normal path.
64173 */
64174 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
64175- sys_unlink("/initrd.image");
64176+ sys_unlink((const char __force_user *)"/initrd.image");
64177 handle_initrd();
64178 return 1;
64179 }
64180 }
64181- sys_unlink("/initrd.image");
64182+ sys_unlink((const char __force_user *)"/initrd.image");
64183 return 0;
64184 }
64185diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
64186index 32c4799..c27ee74 100644
64187--- a/init/do_mounts_md.c
64188+++ b/init/do_mounts_md.c
64189@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
64190 partitioned ? "_d" : "", minor,
64191 md_setup_args[ent].device_names);
64192
64193- fd = sys_open(name, 0, 0);
64194+ fd = sys_open((char __force_user *)name, 0, 0);
64195 if (fd < 0) {
64196 printk(KERN_ERR "md: open failed - cannot start "
64197 "array %s\n", name);
64198@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
64199 * array without it
64200 */
64201 sys_close(fd);
64202- fd = sys_open(name, 0, 0);
64203+ fd = sys_open((char __force_user *)name, 0, 0);
64204 sys_ioctl(fd, BLKRRPART, 0);
64205 }
64206 sys_close(fd);
64207@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
64208
64209 wait_for_device_probe();
64210
64211- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
64212+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
64213 if (fd >= 0) {
64214 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
64215 sys_close(fd);
64216diff --git a/init/initramfs.c b/init/initramfs.c
64217index 8216c30..25e8e32 100644
64218--- a/init/initramfs.c
64219+++ b/init/initramfs.c
64220@@ -74,7 +74,7 @@ static void __init free_hash(void)
64221 }
64222 }
64223
64224-static long __init do_utime(char __user *filename, time_t mtime)
64225+static long __init do_utime(__force char __user *filename, time_t mtime)
64226 {
64227 struct timespec t[2];
64228
64229@@ -109,7 +109,7 @@ static void __init dir_utime(void)
64230 struct dir_entry *de, *tmp;
64231 list_for_each_entry_safe(de, tmp, &dir_list, list) {
64232 list_del(&de->list);
64233- do_utime(de->name, de->mtime);
64234+ do_utime((char __force_user *)de->name, de->mtime);
64235 kfree(de->name);
64236 kfree(de);
64237 }
64238@@ -271,7 +271,7 @@ static int __init maybe_link(void)
64239 if (nlink >= 2) {
64240 char *old = find_link(major, minor, ino, mode, collected);
64241 if (old)
64242- return (sys_link(old, collected) < 0) ? -1 : 1;
64243+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
64244 }
64245 return 0;
64246 }
64247@@ -280,11 +280,11 @@ static void __init clean_path(char *path, umode_t mode)
64248 {
64249 struct stat st;
64250
64251- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
64252+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
64253 if (S_ISDIR(st.st_mode))
64254- sys_rmdir(path);
64255+ sys_rmdir((char __force_user *)path);
64256 else
64257- sys_unlink(path);
64258+ sys_unlink((char __force_user *)path);
64259 }
64260 }
64261
64262@@ -305,7 +305,7 @@ static int __init do_name(void)
64263 int openflags = O_WRONLY|O_CREAT;
64264 if (ml != 1)
64265 openflags |= O_TRUNC;
64266- wfd = sys_open(collected, openflags, mode);
64267+ wfd = sys_open((char __force_user *)collected, openflags, mode);
64268
64269 if (wfd >= 0) {
64270 sys_fchown(wfd, uid, gid);
64271@@ -317,17 +317,17 @@ static int __init do_name(void)
64272 }
64273 }
64274 } else if (S_ISDIR(mode)) {
64275- sys_mkdir(collected, mode);
64276- sys_chown(collected, uid, gid);
64277- sys_chmod(collected, mode);
64278+ sys_mkdir((char __force_user *)collected, mode);
64279+ sys_chown((char __force_user *)collected, uid, gid);
64280+ sys_chmod((char __force_user *)collected, mode);
64281 dir_add(collected, mtime);
64282 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
64283 S_ISFIFO(mode) || S_ISSOCK(mode)) {
64284 if (maybe_link() == 0) {
64285- sys_mknod(collected, mode, rdev);
64286- sys_chown(collected, uid, gid);
64287- sys_chmod(collected, mode);
64288- do_utime(collected, mtime);
64289+ sys_mknod((char __force_user *)collected, mode, rdev);
64290+ sys_chown((char __force_user *)collected, uid, gid);
64291+ sys_chmod((char __force_user *)collected, mode);
64292+ do_utime((char __force_user *)collected, mtime);
64293 }
64294 }
64295 return 0;
64296@@ -336,15 +336,15 @@ static int __init do_name(void)
64297 static int __init do_copy(void)
64298 {
64299 if (count >= body_len) {
64300- sys_write(wfd, victim, body_len);
64301+ sys_write(wfd, (char __force_user *)victim, body_len);
64302 sys_close(wfd);
64303- do_utime(vcollected, mtime);
64304+ do_utime((char __force_user *)vcollected, mtime);
64305 kfree(vcollected);
64306 eat(body_len);
64307 state = SkipIt;
64308 return 0;
64309 } else {
64310- sys_write(wfd, victim, count);
64311+ sys_write(wfd, (char __force_user *)victim, count);
64312 body_len -= count;
64313 eat(count);
64314 return 1;
64315@@ -355,9 +355,9 @@ static int __init do_symlink(void)
64316 {
64317 collected[N_ALIGN(name_len) + body_len] = '\0';
64318 clean_path(collected, 0);
64319- sys_symlink(collected + N_ALIGN(name_len), collected);
64320- sys_lchown(collected, uid, gid);
64321- do_utime(collected, mtime);
64322+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
64323+ sys_lchown((char __force_user *)collected, uid, gid);
64324+ do_utime((char __force_user *)collected, mtime);
64325 state = SkipIt;
64326 next_state = Reset;
64327 return 0;
64328diff --git a/init/main.c b/init/main.c
64329index b08c5f7..09f865e 100644
64330--- a/init/main.c
64331+++ b/init/main.c
64332@@ -95,6 +95,8 @@ static inline void mark_rodata_ro(void) { }
64333 extern void tc_init(void);
64334 #endif
64335
64336+extern void grsecurity_init(void);
64337+
64338 /*
64339 * Debug helper: via this flag we know that we are in 'early bootup code'
64340 * where only the boot processor is running with IRQ disabled. This means
64341@@ -148,6 +150,49 @@ static int __init set_reset_devices(char *str)
64342
64343 __setup("reset_devices", set_reset_devices);
64344
64345+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
64346+extern char pax_enter_kernel_user[];
64347+extern char pax_exit_kernel_user[];
64348+extern pgdval_t clone_pgd_mask;
64349+#endif
64350+
64351+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
64352+static int __init setup_pax_nouderef(char *str)
64353+{
64354+#ifdef CONFIG_X86_32
64355+ unsigned int cpu;
64356+ struct desc_struct *gdt;
64357+
64358+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
64359+ gdt = get_cpu_gdt_table(cpu);
64360+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
64361+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
64362+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
64363+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
64364+ }
64365+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
64366+#else
64367+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
64368+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
64369+ clone_pgd_mask = ~(pgdval_t)0UL;
64370+#endif
64371+
64372+ return 0;
64373+}
64374+early_param("pax_nouderef", setup_pax_nouderef);
64375+#endif
64376+
64377+#ifdef CONFIG_PAX_SOFTMODE
64378+int pax_softmode;
64379+
64380+static int __init setup_pax_softmode(char *str)
64381+{
64382+ get_option(&str, &pax_softmode);
64383+ return 1;
64384+}
64385+__setup("pax_softmode=", setup_pax_softmode);
64386+#endif
64387+
64388 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
64389 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
64390 static const char *panic_later, *panic_param;
64391@@ -674,6 +719,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
64392 {
64393 int count = preempt_count();
64394 int ret;
64395+ const char *msg1 = "", *msg2 = "";
64396
64397 if (initcall_debug)
64398 ret = do_one_initcall_debug(fn);
64399@@ -686,15 +732,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
64400 sprintf(msgbuf, "error code %d ", ret);
64401
64402 if (preempt_count() != count) {
64403- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
64404+ msg1 = " preemption imbalance";
64405 preempt_count() = count;
64406 }
64407 if (irqs_disabled()) {
64408- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
64409+ msg2 = " disabled interrupts";
64410 local_irq_enable();
64411 }
64412- if (msgbuf[0]) {
64413- printk("initcall %pF returned with %s\n", fn, msgbuf);
64414+ if (msgbuf[0] || *msg1 || *msg2) {
64415+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
64416 }
64417
64418 return ret;
64419@@ -865,7 +911,7 @@ static int __init kernel_init(void * unused)
64420 do_basic_setup();
64421
64422 /* Open the /dev/console on the rootfs, this should never fail */
64423- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
64424+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
64425 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
64426
64427 (void) sys_dup(0);
64428@@ -878,11 +924,13 @@ static int __init kernel_init(void * unused)
64429 if (!ramdisk_execute_command)
64430 ramdisk_execute_command = "/init";
64431
64432- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
64433+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
64434 ramdisk_execute_command = NULL;
64435 prepare_namespace();
64436 }
64437
64438+ grsecurity_init();
64439+
64440 /*
64441 * Ok, we have completed the initial bootup, and
64442 * we're essentially up and running. Get rid of the
64443diff --git a/ipc/mqueue.c b/ipc/mqueue.c
64444index 28bd64d..c66b72a 100644
64445--- a/ipc/mqueue.c
64446+++ b/ipc/mqueue.c
64447@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
64448 mq_bytes = (mq_msg_tblsz +
64449 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
64450
64451+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
64452 spin_lock(&mq_lock);
64453 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
64454 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
64455diff --git a/ipc/msg.c b/ipc/msg.c
64456index 7385de2..a8180e08 100644
64457--- a/ipc/msg.c
64458+++ b/ipc/msg.c
64459@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
64460 return security_msg_queue_associate(msq, msgflg);
64461 }
64462
64463+static struct ipc_ops msg_ops = {
64464+ .getnew = newque,
64465+ .associate = msg_security,
64466+ .more_checks = NULL
64467+};
64468+
64469 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
64470 {
64471 struct ipc_namespace *ns;
64472- struct ipc_ops msg_ops;
64473 struct ipc_params msg_params;
64474
64475 ns = current->nsproxy->ipc_ns;
64476
64477- msg_ops.getnew = newque;
64478- msg_ops.associate = msg_security;
64479- msg_ops.more_checks = NULL;
64480-
64481 msg_params.key = key;
64482 msg_params.flg = msgflg;
64483
64484diff --git a/ipc/sem.c b/ipc/sem.c
64485index 5215a81..cfc0cac 100644
64486--- a/ipc/sem.c
64487+++ b/ipc/sem.c
64488@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
64489 return 0;
64490 }
64491
64492+static struct ipc_ops sem_ops = {
64493+ .getnew = newary,
64494+ .associate = sem_security,
64495+ .more_checks = sem_more_checks
64496+};
64497+
64498 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
64499 {
64500 struct ipc_namespace *ns;
64501- struct ipc_ops sem_ops;
64502 struct ipc_params sem_params;
64503
64504 ns = current->nsproxy->ipc_ns;
64505@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
64506 if (nsems < 0 || nsems > ns->sc_semmsl)
64507 return -EINVAL;
64508
64509- sem_ops.getnew = newary;
64510- sem_ops.associate = sem_security;
64511- sem_ops.more_checks = sem_more_checks;
64512-
64513 sem_params.key = key;
64514 sem_params.flg = semflg;
64515 sem_params.u.nsems = nsems;
64516diff --git a/ipc/shm.c b/ipc/shm.c
64517index 406c5b2..bc66d67 100644
64518--- a/ipc/shm.c
64519+++ b/ipc/shm.c
64520@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
64521 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
64522 #endif
64523
64524+#ifdef CONFIG_GRKERNSEC
64525+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64526+ const time_t shm_createtime, const uid_t cuid,
64527+ const int shmid);
64528+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64529+ const time_t shm_createtime);
64530+#endif
64531+
64532 void shm_init_ns(struct ipc_namespace *ns)
64533 {
64534 ns->shm_ctlmax = SHMMAX;
64535@@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
64536 shp->shm_lprid = 0;
64537 shp->shm_atim = shp->shm_dtim = 0;
64538 shp->shm_ctim = get_seconds();
64539+#ifdef CONFIG_GRKERNSEC
64540+ {
64541+ struct timespec timeval;
64542+ do_posix_clock_monotonic_gettime(&timeval);
64543+
64544+ shp->shm_createtime = timeval.tv_sec;
64545+ }
64546+#endif
64547 shp->shm_segsz = size;
64548 shp->shm_nattch = 0;
64549 shp->shm_file = file;
64550@@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
64551 return 0;
64552 }
64553
64554+static struct ipc_ops shm_ops = {
64555+ .getnew = newseg,
64556+ .associate = shm_security,
64557+ .more_checks = shm_more_checks
64558+};
64559+
64560 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
64561 {
64562 struct ipc_namespace *ns;
64563- struct ipc_ops shm_ops;
64564 struct ipc_params shm_params;
64565
64566 ns = current->nsproxy->ipc_ns;
64567
64568- shm_ops.getnew = newseg;
64569- shm_ops.associate = shm_security;
64570- shm_ops.more_checks = shm_more_checks;
64571-
64572 shm_params.key = key;
64573 shm_params.flg = shmflg;
64574 shm_params.u.size = size;
64575@@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
64576 f_mode = FMODE_READ | FMODE_WRITE;
64577 }
64578 if (shmflg & SHM_EXEC) {
64579+
64580+#ifdef CONFIG_PAX_MPROTECT
64581+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
64582+ goto out;
64583+#endif
64584+
64585 prot |= PROT_EXEC;
64586 acc_mode |= S_IXUGO;
64587 }
64588@@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
64589 if (err)
64590 goto out_unlock;
64591
64592+#ifdef CONFIG_GRKERNSEC
64593+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
64594+ shp->shm_perm.cuid, shmid) ||
64595+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
64596+ err = -EACCES;
64597+ goto out_unlock;
64598+ }
64599+#endif
64600+
64601 path = shp->shm_file->f_path;
64602 path_get(&path);
64603 shp->shm_nattch++;
64604+#ifdef CONFIG_GRKERNSEC
64605+ shp->shm_lapid = current->pid;
64606+#endif
64607 size = i_size_read(path.dentry->d_inode);
64608 shm_unlock(shp);
64609
64610diff --git a/kernel/acct.c b/kernel/acct.c
64611index 02e6167..54824f7 100644
64612--- a/kernel/acct.c
64613+++ b/kernel/acct.c
64614@@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
64615 */
64616 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
64617 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
64618- file->f_op->write(file, (char *)&ac,
64619+ file->f_op->write(file, (char __force_user *)&ac,
64620 sizeof(acct_t), &file->f_pos);
64621 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
64622 set_fs(fs);
64623diff --git a/kernel/audit.c b/kernel/audit.c
64624index 1c7f2c6..9ba5359 100644
64625--- a/kernel/audit.c
64626+++ b/kernel/audit.c
64627@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
64628 3) suppressed due to audit_rate_limit
64629 4) suppressed due to audit_backlog_limit
64630 */
64631-static atomic_t audit_lost = ATOMIC_INIT(0);
64632+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
64633
64634 /* The netlink socket. */
64635 static struct sock *audit_sock;
64636@@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
64637 unsigned long now;
64638 int print;
64639
64640- atomic_inc(&audit_lost);
64641+ atomic_inc_unchecked(&audit_lost);
64642
64643 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
64644
64645@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
64646 printk(KERN_WARNING
64647 "audit: audit_lost=%d audit_rate_limit=%d "
64648 "audit_backlog_limit=%d\n",
64649- atomic_read(&audit_lost),
64650+ atomic_read_unchecked(&audit_lost),
64651 audit_rate_limit,
64652 audit_backlog_limit);
64653 audit_panic(message);
64654@@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
64655 status_set.pid = audit_pid;
64656 status_set.rate_limit = audit_rate_limit;
64657 status_set.backlog_limit = audit_backlog_limit;
64658- status_set.lost = atomic_read(&audit_lost);
64659+ status_set.lost = atomic_read_unchecked(&audit_lost);
64660 status_set.backlog = skb_queue_len(&audit_skb_queue);
64661 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
64662 &status_set, sizeof(status_set));
64663diff --git a/kernel/auditsc.c b/kernel/auditsc.c
64664index af1de0f..06dfe57 100644
64665--- a/kernel/auditsc.c
64666+++ b/kernel/auditsc.c
64667@@ -2288,7 +2288,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
64668 }
64669
64670 /* global counter which is incremented every time something logs in */
64671-static atomic_t session_id = ATOMIC_INIT(0);
64672+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
64673
64674 /**
64675 * audit_set_loginuid - set current task's audit_context loginuid
64676@@ -2312,7 +2312,7 @@ int audit_set_loginuid(uid_t loginuid)
64677 return -EPERM;
64678 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
64679
64680- sessionid = atomic_inc_return(&session_id);
64681+ sessionid = atomic_inc_return_unchecked(&session_id);
64682 if (context && context->in_syscall) {
64683 struct audit_buffer *ab;
64684
64685diff --git a/kernel/capability.c b/kernel/capability.c
64686index 3f1adb6..c564db0 100644
64687--- a/kernel/capability.c
64688+++ b/kernel/capability.c
64689@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
64690 * before modification is attempted and the application
64691 * fails.
64692 */
64693+ if (tocopy > ARRAY_SIZE(kdata))
64694+ return -EFAULT;
64695+
64696 if (copy_to_user(dataptr, kdata, tocopy
64697 * sizeof(struct __user_cap_data_struct))) {
64698 return -EFAULT;
64699@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
64700 int ret;
64701
64702 rcu_read_lock();
64703- ret = security_capable(__task_cred(t), ns, cap);
64704+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
64705+ gr_task_is_capable(t, __task_cred(t), cap);
64706 rcu_read_unlock();
64707
64708- return (ret == 0);
64709+ return ret;
64710 }
64711
64712 /**
64713@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
64714 int ret;
64715
64716 rcu_read_lock();
64717- ret = security_capable_noaudit(__task_cred(t), ns, cap);
64718+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
64719 rcu_read_unlock();
64720
64721- return (ret == 0);
64722+ return ret;
64723 }
64724
64725 /**
64726@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
64727 BUG();
64728 }
64729
64730- if (security_capable(current_cred(), ns, cap) == 0) {
64731+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
64732 current->flags |= PF_SUPERPRIV;
64733 return true;
64734 }
64735@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
64736 }
64737 EXPORT_SYMBOL(ns_capable);
64738
64739+bool ns_capable_nolog(struct user_namespace *ns, int cap)
64740+{
64741+ if (unlikely(!cap_valid(cap))) {
64742+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
64743+ BUG();
64744+ }
64745+
64746+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
64747+ current->flags |= PF_SUPERPRIV;
64748+ return true;
64749+ }
64750+ return false;
64751+}
64752+EXPORT_SYMBOL(ns_capable_nolog);
64753+
64754 /**
64755 * capable - Determine if the current task has a superior capability in effect
64756 * @cap: The capability to be tested for
64757@@ -408,6 +427,12 @@ bool capable(int cap)
64758 }
64759 EXPORT_SYMBOL(capable);
64760
64761+bool capable_nolog(int cap)
64762+{
64763+ return ns_capable_nolog(&init_user_ns, cap);
64764+}
64765+EXPORT_SYMBOL(capable_nolog);
64766+
64767 /**
64768 * nsown_capable - Check superior capability to one's own user_ns
64769 * @cap: The capability in question
64770diff --git a/kernel/compat.c b/kernel/compat.c
64771index d2c67aa..a629b2e 100644
64772--- a/kernel/compat.c
64773+++ b/kernel/compat.c
64774@@ -13,6 +13,7 @@
64775
64776 #include <linux/linkage.h>
64777 #include <linux/compat.h>
64778+#include <linux/module.h>
64779 #include <linux/errno.h>
64780 #include <linux/time.h>
64781 #include <linux/signal.h>
64782@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
64783 mm_segment_t oldfs;
64784 long ret;
64785
64786- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
64787+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
64788 oldfs = get_fs();
64789 set_fs(KERNEL_DS);
64790 ret = hrtimer_nanosleep_restart(restart);
64791@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
64792 oldfs = get_fs();
64793 set_fs(KERNEL_DS);
64794 ret = hrtimer_nanosleep(&tu,
64795- rmtp ? (struct timespec __user *)&rmt : NULL,
64796+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
64797 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
64798 set_fs(oldfs);
64799
64800@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
64801 mm_segment_t old_fs = get_fs();
64802
64803 set_fs(KERNEL_DS);
64804- ret = sys_sigpending((old_sigset_t __user *) &s);
64805+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
64806 set_fs(old_fs);
64807 if (ret == 0)
64808 ret = put_user(s, set);
64809@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
64810 mm_segment_t old_fs = get_fs();
64811
64812 set_fs(KERNEL_DS);
64813- ret = sys_old_getrlimit(resource, &r);
64814+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
64815 set_fs(old_fs);
64816
64817 if (!ret) {
64818@@ -523,7 +524,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
64819 mm_segment_t old_fs = get_fs();
64820
64821 set_fs(KERNEL_DS);
64822- ret = sys_getrusage(who, (struct rusage __user *) &r);
64823+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
64824 set_fs(old_fs);
64825
64826 if (ret)
64827@@ -550,8 +551,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
64828 set_fs (KERNEL_DS);
64829 ret = sys_wait4(pid,
64830 (stat_addr ?
64831- (unsigned int __user *) &status : NULL),
64832- options, (struct rusage __user *) &r);
64833+ (unsigned int __force_user *) &status : NULL),
64834+ options, (struct rusage __force_user *) &r);
64835 set_fs (old_fs);
64836
64837 if (ret > 0) {
64838@@ -576,8 +577,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
64839 memset(&info, 0, sizeof(info));
64840
64841 set_fs(KERNEL_DS);
64842- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
64843- uru ? (struct rusage __user *)&ru : NULL);
64844+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
64845+ uru ? (struct rusage __force_user *)&ru : NULL);
64846 set_fs(old_fs);
64847
64848 if ((ret < 0) || (info.si_signo == 0))
64849@@ -707,8 +708,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
64850 oldfs = get_fs();
64851 set_fs(KERNEL_DS);
64852 err = sys_timer_settime(timer_id, flags,
64853- (struct itimerspec __user *) &newts,
64854- (struct itimerspec __user *) &oldts);
64855+ (struct itimerspec __force_user *) &newts,
64856+ (struct itimerspec __force_user *) &oldts);
64857 set_fs(oldfs);
64858 if (!err && old && put_compat_itimerspec(old, &oldts))
64859 return -EFAULT;
64860@@ -725,7 +726,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
64861 oldfs = get_fs();
64862 set_fs(KERNEL_DS);
64863 err = sys_timer_gettime(timer_id,
64864- (struct itimerspec __user *) &ts);
64865+ (struct itimerspec __force_user *) &ts);
64866 set_fs(oldfs);
64867 if (!err && put_compat_itimerspec(setting, &ts))
64868 return -EFAULT;
64869@@ -744,7 +745,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
64870 oldfs = get_fs();
64871 set_fs(KERNEL_DS);
64872 err = sys_clock_settime(which_clock,
64873- (struct timespec __user *) &ts);
64874+ (struct timespec __force_user *) &ts);
64875 set_fs(oldfs);
64876 return err;
64877 }
64878@@ -759,7 +760,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
64879 oldfs = get_fs();
64880 set_fs(KERNEL_DS);
64881 err = sys_clock_gettime(which_clock,
64882- (struct timespec __user *) &ts);
64883+ (struct timespec __force_user *) &ts);
64884 set_fs(oldfs);
64885 if (!err && put_compat_timespec(&ts, tp))
64886 return -EFAULT;
64887@@ -779,7 +780,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
64888
64889 oldfs = get_fs();
64890 set_fs(KERNEL_DS);
64891- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
64892+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
64893 set_fs(oldfs);
64894
64895 err = compat_put_timex(utp, &txc);
64896@@ -799,7 +800,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
64897 oldfs = get_fs();
64898 set_fs(KERNEL_DS);
64899 err = sys_clock_getres(which_clock,
64900- (struct timespec __user *) &ts);
64901+ (struct timespec __force_user *) &ts);
64902 set_fs(oldfs);
64903 if (!err && tp && put_compat_timespec(&ts, tp))
64904 return -EFAULT;
64905@@ -811,9 +812,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
64906 long err;
64907 mm_segment_t oldfs;
64908 struct timespec tu;
64909- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
64910+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
64911
64912- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
64913+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
64914 oldfs = get_fs();
64915 set_fs(KERNEL_DS);
64916 err = clock_nanosleep_restart(restart);
64917@@ -845,8 +846,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
64918 oldfs = get_fs();
64919 set_fs(KERNEL_DS);
64920 err = sys_clock_nanosleep(which_clock, flags,
64921- (struct timespec __user *) &in,
64922- (struct timespec __user *) &out);
64923+ (struct timespec __force_user *) &in,
64924+ (struct timespec __force_user *) &out);
64925 set_fs(oldfs);
64926
64927 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
64928diff --git a/kernel/configs.c b/kernel/configs.c
64929index 42e8fa0..9e7406b 100644
64930--- a/kernel/configs.c
64931+++ b/kernel/configs.c
64932@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
64933 struct proc_dir_entry *entry;
64934
64935 /* create the current config file */
64936+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
64937+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
64938+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
64939+ &ikconfig_file_ops);
64940+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64941+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
64942+ &ikconfig_file_ops);
64943+#endif
64944+#else
64945 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
64946 &ikconfig_file_ops);
64947+#endif
64948+
64949 if (!entry)
64950 return -ENOMEM;
64951
64952diff --git a/kernel/cred.c b/kernel/cred.c
64953index e70683d..27761b6 100644
64954--- a/kernel/cred.c
64955+++ b/kernel/cred.c
64956@@ -205,6 +205,15 @@ void exit_creds(struct task_struct *tsk)
64957 validate_creds(cred);
64958 put_cred(cred);
64959 }
64960+
64961+#ifdef CONFIG_GRKERNSEC_SETXID
64962+ cred = (struct cred *) tsk->delayed_cred;
64963+ if (cred) {
64964+ tsk->delayed_cred = NULL;
64965+ validate_creds(cred);
64966+ put_cred(cred);
64967+ }
64968+#endif
64969 }
64970
64971 /**
64972@@ -473,7 +482,7 @@ error_put:
64973 * Always returns 0 thus allowing this function to be tail-called at the end
64974 * of, say, sys_setgid().
64975 */
64976-int commit_creds(struct cred *new)
64977+static int __commit_creds(struct cred *new)
64978 {
64979 struct task_struct *task = current;
64980 const struct cred *old = task->real_cred;
64981@@ -492,6 +501,8 @@ int commit_creds(struct cred *new)
64982
64983 get_cred(new); /* we will require a ref for the subj creds too */
64984
64985+ gr_set_role_label(task, new->uid, new->gid);
64986+
64987 /* dumpability changes */
64988 if (old->euid != new->euid ||
64989 old->egid != new->egid ||
64990@@ -541,6 +552,101 @@ int commit_creds(struct cred *new)
64991 put_cred(old);
64992 return 0;
64993 }
64994+#ifdef CONFIG_GRKERNSEC_SETXID
64995+extern int set_user(struct cred *new);
64996+
64997+void gr_delayed_cred_worker(void)
64998+{
64999+ const struct cred *new = current->delayed_cred;
65000+ struct cred *ncred;
65001+
65002+ current->delayed_cred = NULL;
65003+
65004+ if (current_uid() && new != NULL) {
65005+ // from doing get_cred on it when queueing this
65006+ put_cred(new);
65007+ return;
65008+ } else if (new == NULL)
65009+ return;
65010+
65011+ ncred = prepare_creds();
65012+ if (!ncred)
65013+ goto die;
65014+ // uids
65015+ ncred->uid = new->uid;
65016+ ncred->euid = new->euid;
65017+ ncred->suid = new->suid;
65018+ ncred->fsuid = new->fsuid;
65019+ // gids
65020+ ncred->gid = new->gid;
65021+ ncred->egid = new->egid;
65022+ ncred->sgid = new->sgid;
65023+ ncred->fsgid = new->fsgid;
65024+ // groups
65025+ if (set_groups(ncred, new->group_info) < 0) {
65026+ abort_creds(ncred);
65027+ goto die;
65028+ }
65029+ // caps
65030+ ncred->securebits = new->securebits;
65031+ ncred->cap_inheritable = new->cap_inheritable;
65032+ ncred->cap_permitted = new->cap_permitted;
65033+ ncred->cap_effective = new->cap_effective;
65034+ ncred->cap_bset = new->cap_bset;
65035+
65036+ if (set_user(ncred)) {
65037+ abort_creds(ncred);
65038+ goto die;
65039+ }
65040+
65041+ // from doing get_cred on it when queueing this
65042+ put_cred(new);
65043+
65044+ __commit_creds(ncred);
65045+ return;
65046+die:
65047+ // from doing get_cred on it when queueing this
65048+ put_cred(new);
65049+ do_group_exit(SIGKILL);
65050+}
65051+#endif
65052+
65053+int commit_creds(struct cred *new)
65054+{
65055+#ifdef CONFIG_GRKERNSEC_SETXID
65056+ int ret;
65057+ int schedule_it = 0;
65058+ struct task_struct *t;
65059+
65060+ /* we won't get called with tasklist_lock held for writing
65061+ and interrupts disabled as the cred struct in that case is
65062+ init_cred
65063+ */
65064+ if (grsec_enable_setxid && !current_is_single_threaded() &&
65065+ !current_uid() && new->uid) {
65066+ schedule_it = 1;
65067+ }
65068+ ret = __commit_creds(new);
65069+ if (schedule_it) {
65070+ rcu_read_lock();
65071+ read_lock(&tasklist_lock);
65072+ for (t = next_thread(current); t != current;
65073+ t = next_thread(t)) {
65074+ if (t->delayed_cred == NULL) {
65075+ t->delayed_cred = get_cred(new);
65076+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
65077+ set_tsk_need_resched(t);
65078+ }
65079+ }
65080+ read_unlock(&tasklist_lock);
65081+ rcu_read_unlock();
65082+ }
65083+ return ret;
65084+#else
65085+ return __commit_creds(new);
65086+#endif
65087+}
65088+
65089 EXPORT_SYMBOL(commit_creds);
65090
65091 /**
65092diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
65093index 0557f24..1a00d9a 100644
65094--- a/kernel/debug/debug_core.c
65095+++ b/kernel/debug/debug_core.c
65096@@ -122,7 +122,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
65097 */
65098 static atomic_t masters_in_kgdb;
65099 static atomic_t slaves_in_kgdb;
65100-static atomic_t kgdb_break_tasklet_var;
65101+static atomic_unchecked_t kgdb_break_tasklet_var;
65102 atomic_t kgdb_setting_breakpoint;
65103
65104 struct task_struct *kgdb_usethread;
65105@@ -132,7 +132,7 @@ int kgdb_single_step;
65106 static pid_t kgdb_sstep_pid;
65107
65108 /* to keep track of the CPU which is doing the single stepping*/
65109-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
65110+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
65111
65112 /*
65113 * If you are debugging a problem where roundup (the collection of
65114@@ -540,7 +540,7 @@ return_normal:
65115 * kernel will only try for the value of sstep_tries before
65116 * giving up and continuing on.
65117 */
65118- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
65119+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
65120 (kgdb_info[cpu].task &&
65121 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
65122 atomic_set(&kgdb_active, -1);
65123@@ -634,8 +634,8 @@ cpu_master_loop:
65124 }
65125
65126 kgdb_restore:
65127- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
65128- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
65129+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
65130+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
65131 if (kgdb_info[sstep_cpu].task)
65132 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
65133 else
65134@@ -861,18 +861,18 @@ static void kgdb_unregister_callbacks(void)
65135 static void kgdb_tasklet_bpt(unsigned long ing)
65136 {
65137 kgdb_breakpoint();
65138- atomic_set(&kgdb_break_tasklet_var, 0);
65139+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
65140 }
65141
65142 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
65143
65144 void kgdb_schedule_breakpoint(void)
65145 {
65146- if (atomic_read(&kgdb_break_tasklet_var) ||
65147+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
65148 atomic_read(&kgdb_active) != -1 ||
65149 atomic_read(&kgdb_setting_breakpoint))
65150 return;
65151- atomic_inc(&kgdb_break_tasklet_var);
65152+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
65153 tasklet_schedule(&kgdb_tasklet_breakpoint);
65154 }
65155 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
65156diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
65157index 67b847d..93834dd 100644
65158--- a/kernel/debug/kdb/kdb_main.c
65159+++ b/kernel/debug/kdb/kdb_main.c
65160@@ -1983,7 +1983,7 @@ static int kdb_lsmod(int argc, const char **argv)
65161 list_for_each_entry(mod, kdb_modules, list) {
65162
65163 kdb_printf("%-20s%8u 0x%p ", mod->name,
65164- mod->core_size, (void *)mod);
65165+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
65166 #ifdef CONFIG_MODULE_UNLOAD
65167 kdb_printf("%4ld ", module_refcount(mod));
65168 #endif
65169@@ -1993,7 +1993,7 @@ static int kdb_lsmod(int argc, const char **argv)
65170 kdb_printf(" (Loading)");
65171 else
65172 kdb_printf(" (Live)");
65173- kdb_printf(" 0x%p", mod->module_core);
65174+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
65175
65176 #ifdef CONFIG_MODULE_UNLOAD
65177 {
65178diff --git a/kernel/events/core.c b/kernel/events/core.c
65179index fd126f8..70b755b 100644
65180--- a/kernel/events/core.c
65181+++ b/kernel/events/core.c
65182@@ -181,7 +181,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
65183 return 0;
65184 }
65185
65186-static atomic64_t perf_event_id;
65187+static atomic64_unchecked_t perf_event_id;
65188
65189 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
65190 enum event_type_t event_type);
65191@@ -2659,7 +2659,7 @@ static void __perf_event_read(void *info)
65192
65193 static inline u64 perf_event_count(struct perf_event *event)
65194 {
65195- return local64_read(&event->count) + atomic64_read(&event->child_count);
65196+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
65197 }
65198
65199 static u64 perf_event_read(struct perf_event *event)
65200@@ -2983,9 +2983,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
65201 mutex_lock(&event->child_mutex);
65202 total += perf_event_read(event);
65203 *enabled += event->total_time_enabled +
65204- atomic64_read(&event->child_total_time_enabled);
65205+ atomic64_read_unchecked(&event->child_total_time_enabled);
65206 *running += event->total_time_running +
65207- atomic64_read(&event->child_total_time_running);
65208+ atomic64_read_unchecked(&event->child_total_time_running);
65209
65210 list_for_each_entry(child, &event->child_list, child_list) {
65211 total += perf_event_read(child);
65212@@ -3393,10 +3393,10 @@ void perf_event_update_userpage(struct perf_event *event)
65213 userpg->offset -= local64_read(&event->hw.prev_count);
65214
65215 userpg->time_enabled = enabled +
65216- atomic64_read(&event->child_total_time_enabled);
65217+ atomic64_read_unchecked(&event->child_total_time_enabled);
65218
65219 userpg->time_running = running +
65220- atomic64_read(&event->child_total_time_running);
65221+ atomic64_read_unchecked(&event->child_total_time_running);
65222
65223 arch_perf_update_userpage(userpg, now);
65224
65225@@ -3829,11 +3829,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
65226 values[n++] = perf_event_count(event);
65227 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
65228 values[n++] = enabled +
65229- atomic64_read(&event->child_total_time_enabled);
65230+ atomic64_read_unchecked(&event->child_total_time_enabled);
65231 }
65232 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
65233 values[n++] = running +
65234- atomic64_read(&event->child_total_time_running);
65235+ atomic64_read_unchecked(&event->child_total_time_running);
65236 }
65237 if (read_format & PERF_FORMAT_ID)
65238 values[n++] = primary_event_id(event);
65239@@ -4511,12 +4511,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
65240 * need to add enough zero bytes after the string to handle
65241 * the 64bit alignment we do later.
65242 */
65243- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
65244+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
65245 if (!buf) {
65246 name = strncpy(tmp, "//enomem", sizeof(tmp));
65247 goto got_name;
65248 }
65249- name = d_path(&file->f_path, buf, PATH_MAX);
65250+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
65251 if (IS_ERR(name)) {
65252 name = strncpy(tmp, "//toolong", sizeof(tmp));
65253 goto got_name;
65254@@ -5929,7 +5929,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
65255 event->parent = parent_event;
65256
65257 event->ns = get_pid_ns(current->nsproxy->pid_ns);
65258- event->id = atomic64_inc_return(&perf_event_id);
65259+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
65260
65261 event->state = PERF_EVENT_STATE_INACTIVE;
65262
65263@@ -6491,10 +6491,10 @@ static void sync_child_event(struct perf_event *child_event,
65264 /*
65265 * Add back the child's count to the parent's count:
65266 */
65267- atomic64_add(child_val, &parent_event->child_count);
65268- atomic64_add(child_event->total_time_enabled,
65269+ atomic64_add_unchecked(child_val, &parent_event->child_count);
65270+ atomic64_add_unchecked(child_event->total_time_enabled,
65271 &parent_event->child_total_time_enabled);
65272- atomic64_add(child_event->total_time_running,
65273+ atomic64_add_unchecked(child_event->total_time_running,
65274 &parent_event->child_total_time_running);
65275
65276 /*
65277diff --git a/kernel/exit.c b/kernel/exit.c
65278index d8bd3b42..26bd8dc 100644
65279--- a/kernel/exit.c
65280+++ b/kernel/exit.c
65281@@ -59,6 +59,10 @@
65282 #include <asm/pgtable.h>
65283 #include <asm/mmu_context.h>
65284
65285+#ifdef CONFIG_GRKERNSEC
65286+extern rwlock_t grsec_exec_file_lock;
65287+#endif
65288+
65289 static void exit_mm(struct task_struct * tsk);
65290
65291 static void __unhash_process(struct task_struct *p, bool group_dead)
65292@@ -170,6 +174,10 @@ void release_task(struct task_struct * p)
65293 struct task_struct *leader;
65294 int zap_leader;
65295 repeat:
65296+#ifdef CONFIG_NET
65297+ gr_del_task_from_ip_table(p);
65298+#endif
65299+
65300 /* don't need to get the RCU readlock here - the process is dead and
65301 * can't be modifying its own credentials. But shut RCU-lockdep up */
65302 rcu_read_lock();
65303@@ -382,7 +390,7 @@ int allow_signal(int sig)
65304 * know it'll be handled, so that they don't get converted to
65305 * SIGKILL or just silently dropped.
65306 */
65307- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
65308+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
65309 recalc_sigpending();
65310 spin_unlock_irq(&current->sighand->siglock);
65311 return 0;
65312@@ -418,6 +426,17 @@ void daemonize(const char *name, ...)
65313 vsnprintf(current->comm, sizeof(current->comm), name, args);
65314 va_end(args);
65315
65316+#ifdef CONFIG_GRKERNSEC
65317+ write_lock(&grsec_exec_file_lock);
65318+ if (current->exec_file) {
65319+ fput(current->exec_file);
65320+ current->exec_file = NULL;
65321+ }
65322+ write_unlock(&grsec_exec_file_lock);
65323+#endif
65324+
65325+ gr_set_kernel_label(current);
65326+
65327 /*
65328 * If we were started as result of loading a module, close all of the
65329 * user space pages. We don't need them, and if we didn't close them
65330@@ -900,6 +919,8 @@ void do_exit(long code)
65331 struct task_struct *tsk = current;
65332 int group_dead;
65333
65334+ set_fs(USER_DS);
65335+
65336 profile_task_exit(tsk);
65337
65338 WARN_ON(blk_needs_flush_plug(tsk));
65339@@ -916,7 +937,6 @@ void do_exit(long code)
65340 * mm_release()->clear_child_tid() from writing to a user-controlled
65341 * kernel address.
65342 */
65343- set_fs(USER_DS);
65344
65345 ptrace_event(PTRACE_EVENT_EXIT, code);
65346
65347@@ -977,6 +997,9 @@ void do_exit(long code)
65348 tsk->exit_code = code;
65349 taskstats_exit(tsk, group_dead);
65350
65351+ gr_acl_handle_psacct(tsk, code);
65352+ gr_acl_handle_exit();
65353+
65354 exit_mm(tsk);
65355
65356 if (group_dead)
65357@@ -1093,7 +1116,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
65358 * Take down every thread in the group. This is called by fatal signals
65359 * as well as by sys_exit_group (below).
65360 */
65361-void
65362+__noreturn void
65363 do_group_exit(int exit_code)
65364 {
65365 struct signal_struct *sig = current->signal;
65366diff --git a/kernel/fork.c b/kernel/fork.c
65367index 8163333..efb4692 100644
65368--- a/kernel/fork.c
65369+++ b/kernel/fork.c
65370@@ -286,7 +286,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
65371 *stackend = STACK_END_MAGIC; /* for overflow detection */
65372
65373 #ifdef CONFIG_CC_STACKPROTECTOR
65374- tsk->stack_canary = get_random_int();
65375+ tsk->stack_canary = pax_get_random_long();
65376 #endif
65377
65378 /*
65379@@ -310,13 +310,78 @@ out:
65380 }
65381
65382 #ifdef CONFIG_MMU
65383+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
65384+{
65385+ struct vm_area_struct *tmp;
65386+ unsigned long charge;
65387+ struct mempolicy *pol;
65388+ struct file *file;
65389+
65390+ charge = 0;
65391+ if (mpnt->vm_flags & VM_ACCOUNT) {
65392+ unsigned long len;
65393+ len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
65394+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
65395+ goto fail_nomem;
65396+ charge = len;
65397+ }
65398+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65399+ if (!tmp)
65400+ goto fail_nomem;
65401+ *tmp = *mpnt;
65402+ tmp->vm_mm = mm;
65403+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
65404+ pol = mpol_dup(vma_policy(mpnt));
65405+ if (IS_ERR(pol))
65406+ goto fail_nomem_policy;
65407+ vma_set_policy(tmp, pol);
65408+ if (anon_vma_fork(tmp, mpnt))
65409+ goto fail_nomem_anon_vma_fork;
65410+ tmp->vm_flags &= ~VM_LOCKED;
65411+ tmp->vm_next = tmp->vm_prev = NULL;
65412+ tmp->vm_mirror = NULL;
65413+ file = tmp->vm_file;
65414+ if (file) {
65415+ struct inode *inode = file->f_path.dentry->d_inode;
65416+ struct address_space *mapping = file->f_mapping;
65417+
65418+ get_file(file);
65419+ if (tmp->vm_flags & VM_DENYWRITE)
65420+ atomic_dec(&inode->i_writecount);
65421+ mutex_lock(&mapping->i_mmap_mutex);
65422+ if (tmp->vm_flags & VM_SHARED)
65423+ mapping->i_mmap_writable++;
65424+ flush_dcache_mmap_lock(mapping);
65425+ /* insert tmp into the share list, just after mpnt */
65426+ vma_prio_tree_add(tmp, mpnt);
65427+ flush_dcache_mmap_unlock(mapping);
65428+ mutex_unlock(&mapping->i_mmap_mutex);
65429+ }
65430+
65431+ /*
65432+ * Clear hugetlb-related page reserves for children. This only
65433+ * affects MAP_PRIVATE mappings. Faults generated by the child
65434+ * are not guaranteed to succeed, even if read-only
65435+ */
65436+ if (is_vm_hugetlb_page(tmp))
65437+ reset_vma_resv_huge_pages(tmp);
65438+
65439+ return tmp;
65440+
65441+fail_nomem_anon_vma_fork:
65442+ mpol_put(pol);
65443+fail_nomem_policy:
65444+ kmem_cache_free(vm_area_cachep, tmp);
65445+fail_nomem:
65446+ vm_unacct_memory(charge);
65447+ return NULL;
65448+}
65449+
65450 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65451 {
65452 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
65453 struct rb_node **rb_link, *rb_parent;
65454 int retval;
65455- unsigned long charge;
65456- struct mempolicy *pol;
65457
65458 down_write(&oldmm->mmap_sem);
65459 flush_cache_dup_mm(oldmm);
65460@@ -328,8 +393,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65461 mm->locked_vm = 0;
65462 mm->mmap = NULL;
65463 mm->mmap_cache = NULL;
65464- mm->free_area_cache = oldmm->mmap_base;
65465- mm->cached_hole_size = ~0UL;
65466+ mm->free_area_cache = oldmm->free_area_cache;
65467+ mm->cached_hole_size = oldmm->cached_hole_size;
65468 mm->map_count = 0;
65469 cpumask_clear(mm_cpumask(mm));
65470 mm->mm_rb = RB_ROOT;
65471@@ -345,8 +410,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65472
65473 prev = NULL;
65474 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
65475- struct file *file;
65476-
65477 if (mpnt->vm_flags & VM_DONTCOPY) {
65478 long pages = vma_pages(mpnt);
65479 mm->total_vm -= pages;
65480@@ -354,54 +417,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65481 -pages);
65482 continue;
65483 }
65484- charge = 0;
65485- if (mpnt->vm_flags & VM_ACCOUNT) {
65486- unsigned long len;
65487- len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
65488- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
65489- goto fail_nomem;
65490- charge = len;
65491+ tmp = dup_vma(mm, oldmm, mpnt);
65492+ if (!tmp) {
65493+ retval = -ENOMEM;
65494+ goto out;
65495 }
65496- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65497- if (!tmp)
65498- goto fail_nomem;
65499- *tmp = *mpnt;
65500- INIT_LIST_HEAD(&tmp->anon_vma_chain);
65501- pol = mpol_dup(vma_policy(mpnt));
65502- retval = PTR_ERR(pol);
65503- if (IS_ERR(pol))
65504- goto fail_nomem_policy;
65505- vma_set_policy(tmp, pol);
65506- tmp->vm_mm = mm;
65507- if (anon_vma_fork(tmp, mpnt))
65508- goto fail_nomem_anon_vma_fork;
65509- tmp->vm_flags &= ~VM_LOCKED;
65510- tmp->vm_next = tmp->vm_prev = NULL;
65511- file = tmp->vm_file;
65512- if (file) {
65513- struct inode *inode = file->f_path.dentry->d_inode;
65514- struct address_space *mapping = file->f_mapping;
65515-
65516- get_file(file);
65517- if (tmp->vm_flags & VM_DENYWRITE)
65518- atomic_dec(&inode->i_writecount);
65519- mutex_lock(&mapping->i_mmap_mutex);
65520- if (tmp->vm_flags & VM_SHARED)
65521- mapping->i_mmap_writable++;
65522- flush_dcache_mmap_lock(mapping);
65523- /* insert tmp into the share list, just after mpnt */
65524- vma_prio_tree_add(tmp, mpnt);
65525- flush_dcache_mmap_unlock(mapping);
65526- mutex_unlock(&mapping->i_mmap_mutex);
65527- }
65528-
65529- /*
65530- * Clear hugetlb-related page reserves for children. This only
65531- * affects MAP_PRIVATE mappings. Faults generated by the child
65532- * are not guaranteed to succeed, even if read-only
65533- */
65534- if (is_vm_hugetlb_page(tmp))
65535- reset_vma_resv_huge_pages(tmp);
65536
65537 /*
65538 * Link in the new vma and copy the page table entries.
65539@@ -424,6 +444,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65540 if (retval)
65541 goto out;
65542 }
65543+
65544+#ifdef CONFIG_PAX_SEGMEXEC
65545+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
65546+ struct vm_area_struct *mpnt_m;
65547+
65548+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
65549+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
65550+
65551+ if (!mpnt->vm_mirror)
65552+ continue;
65553+
65554+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
65555+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
65556+ mpnt->vm_mirror = mpnt_m;
65557+ } else {
65558+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
65559+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
65560+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
65561+ mpnt->vm_mirror->vm_mirror = mpnt;
65562+ }
65563+ }
65564+ BUG_ON(mpnt_m);
65565+ }
65566+#endif
65567+
65568 /* a new mm has just been created */
65569 arch_dup_mmap(oldmm, mm);
65570 retval = 0;
65571@@ -432,14 +477,6 @@ out:
65572 flush_tlb_mm(oldmm);
65573 up_write(&oldmm->mmap_sem);
65574 return retval;
65575-fail_nomem_anon_vma_fork:
65576- mpol_put(pol);
65577-fail_nomem_policy:
65578- kmem_cache_free(vm_area_cachep, tmp);
65579-fail_nomem:
65580- retval = -ENOMEM;
65581- vm_unacct_memory(charge);
65582- goto out;
65583 }
65584
65585 static inline int mm_alloc_pgd(struct mm_struct *mm)
65586@@ -676,8 +713,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
65587 return ERR_PTR(err);
65588
65589 mm = get_task_mm(task);
65590- if (mm && mm != current->mm &&
65591- !ptrace_may_access(task, mode)) {
65592+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
65593+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
65594 mmput(mm);
65595 mm = ERR_PTR(-EACCES);
65596 }
65597@@ -899,13 +936,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
65598 spin_unlock(&fs->lock);
65599 return -EAGAIN;
65600 }
65601- fs->users++;
65602+ atomic_inc(&fs->users);
65603 spin_unlock(&fs->lock);
65604 return 0;
65605 }
65606 tsk->fs = copy_fs_struct(fs);
65607 if (!tsk->fs)
65608 return -ENOMEM;
65609+ gr_set_chroot_entries(tsk, &tsk->fs->root);
65610 return 0;
65611 }
65612
65613@@ -1172,6 +1210,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
65614 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
65615 #endif
65616 retval = -EAGAIN;
65617+
65618+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
65619+
65620 if (atomic_read(&p->real_cred->user->processes) >=
65621 task_rlimit(p, RLIMIT_NPROC)) {
65622 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
65623@@ -1328,6 +1369,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
65624 if (clone_flags & CLONE_THREAD)
65625 p->tgid = current->tgid;
65626
65627+ gr_copy_label(p);
65628+
65629 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
65630 /*
65631 * Clear TID on mm_release()?
65632@@ -1502,6 +1545,8 @@ bad_fork_cleanup_count:
65633 bad_fork_free:
65634 free_task(p);
65635 fork_out:
65636+ gr_log_forkfail(retval);
65637+
65638 return ERR_PTR(retval);
65639 }
65640
65641@@ -1602,6 +1647,8 @@ long do_fork(unsigned long clone_flags,
65642 if (clone_flags & CLONE_PARENT_SETTID)
65643 put_user(nr, parent_tidptr);
65644
65645+ gr_handle_brute_check();
65646+
65647 if (clone_flags & CLONE_VFORK) {
65648 p->vfork_done = &vfork;
65649 init_completion(&vfork);
65650@@ -1700,7 +1747,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
65651 return 0;
65652
65653 /* don't need lock here; in the worst case we'll do useless copy */
65654- if (fs->users == 1)
65655+ if (atomic_read(&fs->users) == 1)
65656 return 0;
65657
65658 *new_fsp = copy_fs_struct(fs);
65659@@ -1789,7 +1836,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
65660 fs = current->fs;
65661 spin_lock(&fs->lock);
65662 current->fs = new_fs;
65663- if (--fs->users)
65664+ gr_set_chroot_entries(current, &current->fs->root);
65665+ if (atomic_dec_return(&fs->users))
65666 new_fs = NULL;
65667 else
65668 new_fs = fs;
65669diff --git a/kernel/futex.c b/kernel/futex.c
65670index e2b0fb9..db818ac 100644
65671--- a/kernel/futex.c
65672+++ b/kernel/futex.c
65673@@ -54,6 +54,7 @@
65674 #include <linux/mount.h>
65675 #include <linux/pagemap.h>
65676 #include <linux/syscalls.h>
65677+#include <linux/ptrace.h>
65678 #include <linux/signal.h>
65679 #include <linux/export.h>
65680 #include <linux/magic.h>
65681@@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
65682 struct page *page, *page_head;
65683 int err, ro = 0;
65684
65685+#ifdef CONFIG_PAX_SEGMEXEC
65686+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
65687+ return -EFAULT;
65688+#endif
65689+
65690 /*
65691 * The futex address must be "naturally" aligned.
65692 */
65693@@ -2711,6 +2717,7 @@ static int __init futex_init(void)
65694 {
65695 u32 curval;
65696 int i;
65697+ mm_segment_t oldfs;
65698
65699 /*
65700 * This will fail and we want it. Some arch implementations do
65701@@ -2722,8 +2729,11 @@ static int __init futex_init(void)
65702 * implementation, the non-functional ones will return
65703 * -ENOSYS.
65704 */
65705+ oldfs = get_fs();
65706+ set_fs(USER_DS);
65707 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
65708 futex_cmpxchg_enabled = 1;
65709+ set_fs(oldfs);
65710
65711 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
65712 plist_head_init(&futex_queues[i].chain);
65713diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
65714index 9b22d03..6295b62 100644
65715--- a/kernel/gcov/base.c
65716+++ b/kernel/gcov/base.c
65717@@ -102,11 +102,6 @@ void gcov_enable_events(void)
65718 }
65719
65720 #ifdef CONFIG_MODULES
65721-static inline int within(void *addr, void *start, unsigned long size)
65722-{
65723- return ((addr >= start) && (addr < start + size));
65724-}
65725-
65726 /* Update list and generate events when modules are unloaded. */
65727 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
65728 void *data)
65729@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
65730 prev = NULL;
65731 /* Remove entries located in module from linked list. */
65732 for (info = gcov_info_head; info; info = info->next) {
65733- if (within(info, mod->module_core, mod->core_size)) {
65734+ if (within_module_core_rw((unsigned long)info, mod)) {
65735 if (prev)
65736 prev->next = info->next;
65737 else
65738diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
65739index ae34bf5..4e2f3d0 100644
65740--- a/kernel/hrtimer.c
65741+++ b/kernel/hrtimer.c
65742@@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
65743 local_irq_restore(flags);
65744 }
65745
65746-static void run_hrtimer_softirq(struct softirq_action *h)
65747+static void run_hrtimer_softirq(void)
65748 {
65749 hrtimer_peek_ahead_timers();
65750 }
65751diff --git a/kernel/jump_label.c b/kernel/jump_label.c
65752index 4304919..408c4c0 100644
65753--- a/kernel/jump_label.c
65754+++ b/kernel/jump_label.c
65755@@ -13,6 +13,7 @@
65756 #include <linux/sort.h>
65757 #include <linux/err.h>
65758 #include <linux/static_key.h>
65759+#include <linux/mm.h>
65760
65761 #ifdef HAVE_JUMP_LABEL
65762
65763@@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
65764
65765 size = (((unsigned long)stop - (unsigned long)start)
65766 / sizeof(struct jump_entry));
65767+ pax_open_kernel();
65768 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
65769+ pax_close_kernel();
65770 }
65771
65772 static void jump_label_update(struct static_key *key, int enable);
65773@@ -356,10 +359,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
65774 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
65775 struct jump_entry *iter;
65776
65777+ pax_open_kernel();
65778 for (iter = iter_start; iter < iter_stop; iter++) {
65779 if (within_module_init(iter->code, mod))
65780 iter->code = 0;
65781 }
65782+ pax_close_kernel();
65783 }
65784
65785 static int
65786diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
65787index 079f1d3..a407562 100644
65788--- a/kernel/kallsyms.c
65789+++ b/kernel/kallsyms.c
65790@@ -11,6 +11,9 @@
65791 * Changed the compression method from stem compression to "table lookup"
65792 * compression (see scripts/kallsyms.c for a more complete description)
65793 */
65794+#ifdef CONFIG_GRKERNSEC_HIDESYM
65795+#define __INCLUDED_BY_HIDESYM 1
65796+#endif
65797 #include <linux/kallsyms.h>
65798 #include <linux/module.h>
65799 #include <linux/init.h>
65800@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
65801
65802 static inline int is_kernel_inittext(unsigned long addr)
65803 {
65804+ if (system_state != SYSTEM_BOOTING)
65805+ return 0;
65806+
65807 if (addr >= (unsigned long)_sinittext
65808 && addr <= (unsigned long)_einittext)
65809 return 1;
65810 return 0;
65811 }
65812
65813+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65814+#ifdef CONFIG_MODULES
65815+static inline int is_module_text(unsigned long addr)
65816+{
65817+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
65818+ return 1;
65819+
65820+ addr = ktla_ktva(addr);
65821+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
65822+}
65823+#else
65824+static inline int is_module_text(unsigned long addr)
65825+{
65826+ return 0;
65827+}
65828+#endif
65829+#endif
65830+
65831 static inline int is_kernel_text(unsigned long addr)
65832 {
65833 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
65834@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
65835
65836 static inline int is_kernel(unsigned long addr)
65837 {
65838+
65839+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65840+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
65841+ return 1;
65842+
65843+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
65844+#else
65845 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
65846+#endif
65847+
65848 return 1;
65849 return in_gate_area_no_mm(addr);
65850 }
65851
65852 static int is_ksym_addr(unsigned long addr)
65853 {
65854+
65855+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65856+ if (is_module_text(addr))
65857+ return 0;
65858+#endif
65859+
65860 if (all_var)
65861 return is_kernel(addr);
65862
65863@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
65864
65865 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
65866 {
65867- iter->name[0] = '\0';
65868 iter->nameoff = get_symbol_offset(new_pos);
65869 iter->pos = new_pos;
65870 }
65871@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
65872 {
65873 struct kallsym_iter *iter = m->private;
65874
65875+#ifdef CONFIG_GRKERNSEC_HIDESYM
65876+ if (current_uid())
65877+ return 0;
65878+#endif
65879+
65880 /* Some debugging symbols have no name. Ignore them. */
65881 if (!iter->name[0])
65882 return 0;
65883@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
65884 struct kallsym_iter *iter;
65885 int ret;
65886
65887- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
65888+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
65889 if (!iter)
65890 return -ENOMEM;
65891 reset_iter(iter, 0);
65892diff --git a/kernel/kexec.c b/kernel/kexec.c
65893index 4e2e472..cd0c7ae 100644
65894--- a/kernel/kexec.c
65895+++ b/kernel/kexec.c
65896@@ -1046,7 +1046,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
65897 unsigned long flags)
65898 {
65899 struct compat_kexec_segment in;
65900- struct kexec_segment out, __user *ksegments;
65901+ struct kexec_segment out;
65902+ struct kexec_segment __user *ksegments;
65903 unsigned long i, result;
65904
65905 /* Don't allow clients that don't understand the native
65906diff --git a/kernel/kmod.c b/kernel/kmod.c
65907index 05698a7..a4c1e3a 100644
65908--- a/kernel/kmod.c
65909+++ b/kernel/kmod.c
65910@@ -66,7 +66,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
65911 kfree(info->argv);
65912 }
65913
65914-static int call_modprobe(char *module_name, int wait)
65915+static int call_modprobe(char *module_name, char *module_param, int wait)
65916 {
65917 static char *envp[] = {
65918 "HOME=/",
65919@@ -75,7 +75,7 @@ static int call_modprobe(char *module_name, int wait)
65920 NULL
65921 };
65922
65923- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
65924+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
65925 if (!argv)
65926 goto out;
65927
65928@@ -87,7 +87,8 @@ static int call_modprobe(char *module_name, int wait)
65929 argv[1] = "-q";
65930 argv[2] = "--";
65931 argv[3] = module_name; /* check free_modprobe_argv() */
65932- argv[4] = NULL;
65933+ argv[4] = module_param;
65934+ argv[5] = NULL;
65935
65936 return call_usermodehelper_fns(modprobe_path, argv, envp,
65937 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
65938@@ -112,9 +113,8 @@ out:
65939 * If module auto-loading support is disabled then this function
65940 * becomes a no-operation.
65941 */
65942-int __request_module(bool wait, const char *fmt, ...)
65943+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
65944 {
65945- va_list args;
65946 char module_name[MODULE_NAME_LEN];
65947 unsigned int max_modprobes;
65948 int ret;
65949@@ -122,9 +122,7 @@ int __request_module(bool wait, const char *fmt, ...)
65950 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
65951 static int kmod_loop_msg;
65952
65953- va_start(args, fmt);
65954- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
65955- va_end(args);
65956+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
65957 if (ret >= MODULE_NAME_LEN)
65958 return -ENAMETOOLONG;
65959
65960@@ -132,6 +130,20 @@ int __request_module(bool wait, const char *fmt, ...)
65961 if (ret)
65962 return ret;
65963
65964+#ifdef CONFIG_GRKERNSEC_MODHARDEN
65965+ if (!current_uid()) {
65966+ /* hack to workaround consolekit/udisks stupidity */
65967+ read_lock(&tasklist_lock);
65968+ if (!strcmp(current->comm, "mount") &&
65969+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
65970+ read_unlock(&tasklist_lock);
65971+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
65972+ return -EPERM;
65973+ }
65974+ read_unlock(&tasklist_lock);
65975+ }
65976+#endif
65977+
65978 /* If modprobe needs a service that is in a module, we get a recursive
65979 * loop. Limit the number of running kmod threads to max_threads/2 or
65980 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
65981@@ -160,11 +172,52 @@ int __request_module(bool wait, const char *fmt, ...)
65982
65983 trace_module_request(module_name, wait, _RET_IP_);
65984
65985- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
65986+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
65987
65988 atomic_dec(&kmod_concurrent);
65989 return ret;
65990 }
65991+
65992+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
65993+{
65994+ va_list args;
65995+ int ret;
65996+
65997+ va_start(args, fmt);
65998+ ret = ____request_module(wait, module_param, fmt, args);
65999+ va_end(args);
66000+
66001+ return ret;
66002+}
66003+
66004+int __request_module(bool wait, const char *fmt, ...)
66005+{
66006+ va_list args;
66007+ int ret;
66008+
66009+#ifdef CONFIG_GRKERNSEC_MODHARDEN
66010+ if (current_uid()) {
66011+ char module_param[MODULE_NAME_LEN];
66012+
66013+ memset(module_param, 0, sizeof(module_param));
66014+
66015+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
66016+
66017+ va_start(args, fmt);
66018+ ret = ____request_module(wait, module_param, fmt, args);
66019+ va_end(args);
66020+
66021+ return ret;
66022+ }
66023+#endif
66024+
66025+ va_start(args, fmt);
66026+ ret = ____request_module(wait, NULL, fmt, args);
66027+ va_end(args);
66028+
66029+ return ret;
66030+}
66031+
66032 EXPORT_SYMBOL(__request_module);
66033 #endif /* CONFIG_MODULES */
66034
66035@@ -267,7 +320,7 @@ static int wait_for_helper(void *data)
66036 *
66037 * Thus the __user pointer cast is valid here.
66038 */
66039- sys_wait4(pid, (int __user *)&ret, 0, NULL);
66040+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
66041
66042 /*
66043 * If ret is 0, either ____call_usermodehelper failed and the
66044diff --git a/kernel/kprobes.c b/kernel/kprobes.c
66045index c62b854..cb67968 100644
66046--- a/kernel/kprobes.c
66047+++ b/kernel/kprobes.c
66048@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
66049 * kernel image and loaded module images reside. This is required
66050 * so x86_64 can correctly handle the %rip-relative fixups.
66051 */
66052- kip->insns = module_alloc(PAGE_SIZE);
66053+ kip->insns = module_alloc_exec(PAGE_SIZE);
66054 if (!kip->insns) {
66055 kfree(kip);
66056 return NULL;
66057@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
66058 */
66059 if (!list_is_singular(&kip->list)) {
66060 list_del(&kip->list);
66061- module_free(NULL, kip->insns);
66062+ module_free_exec(NULL, kip->insns);
66063 kfree(kip);
66064 }
66065 return 1;
66066@@ -1955,7 +1955,7 @@ static int __init init_kprobes(void)
66067 {
66068 int i, err = 0;
66069 unsigned long offset = 0, size = 0;
66070- char *modname, namebuf[128];
66071+ char *modname, namebuf[KSYM_NAME_LEN];
66072 const char *symbol_name;
66073 void *addr;
66074 struct kprobe_blackpoint *kb;
66075@@ -2081,7 +2081,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
66076 const char *sym = NULL;
66077 unsigned int i = *(loff_t *) v;
66078 unsigned long offset = 0;
66079- char *modname, namebuf[128];
66080+ char *modname, namebuf[KSYM_NAME_LEN];
66081
66082 head = &kprobe_table[i];
66083 preempt_disable();
66084diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
66085index 4e316e1..5501eef 100644
66086--- a/kernel/ksysfs.c
66087+++ b/kernel/ksysfs.c
66088@@ -47,6 +47,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
66089 {
66090 if (count+1 > UEVENT_HELPER_PATH_LEN)
66091 return -ENOENT;
66092+ if (!capable(CAP_SYS_ADMIN))
66093+ return -EPERM;
66094 memcpy(uevent_helper, buf, count);
66095 uevent_helper[count] = '\0';
66096 if (count && uevent_helper[count-1] == '\n')
66097diff --git a/kernel/lockdep.c b/kernel/lockdep.c
66098index ea9ee45..67ebc8f 100644
66099--- a/kernel/lockdep.c
66100+++ b/kernel/lockdep.c
66101@@ -590,6 +590,10 @@ static int static_obj(void *obj)
66102 end = (unsigned long) &_end,
66103 addr = (unsigned long) obj;
66104
66105+#ifdef CONFIG_PAX_KERNEXEC
66106+ start = ktla_ktva(start);
66107+#endif
66108+
66109 /*
66110 * static variable?
66111 */
66112@@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
66113 if (!static_obj(lock->key)) {
66114 debug_locks_off();
66115 printk("INFO: trying to register non-static key.\n");
66116+ printk("lock:%pS key:%pS.\n", lock, lock->key);
66117 printk("the code is fine but needs lockdep annotation.\n");
66118 printk("turning off the locking correctness validator.\n");
66119 dump_stack();
66120@@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
66121 if (!class)
66122 return 0;
66123 }
66124- atomic_inc((atomic_t *)&class->ops);
66125+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
66126 if (very_verbose(class)) {
66127 printk("\nacquire class [%p] %s", class->key, class->name);
66128 if (class->name_version > 1)
66129diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
66130index 91c32a0..b2c71c5 100644
66131--- a/kernel/lockdep_proc.c
66132+++ b/kernel/lockdep_proc.c
66133@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
66134
66135 static void print_name(struct seq_file *m, struct lock_class *class)
66136 {
66137- char str[128];
66138+ char str[KSYM_NAME_LEN];
66139 const char *name = class->name;
66140
66141 if (!name) {
66142diff --git a/kernel/module.c b/kernel/module.c
66143index 78ac6ec..e87db0e 100644
66144--- a/kernel/module.c
66145+++ b/kernel/module.c
66146@@ -58,6 +58,7 @@
66147 #include <linux/jump_label.h>
66148 #include <linux/pfn.h>
66149 #include <linux/bsearch.h>
66150+#include <linux/grsecurity.h>
66151
66152 #define CREATE_TRACE_POINTS
66153 #include <trace/events/module.h>
66154@@ -114,7 +115,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
66155
66156 /* Bounds of module allocation, for speeding __module_address.
66157 * Protected by module_mutex. */
66158-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
66159+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
66160+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
66161
66162 int register_module_notifier(struct notifier_block * nb)
66163 {
66164@@ -278,7 +280,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
66165 return true;
66166
66167 list_for_each_entry_rcu(mod, &modules, list) {
66168- struct symsearch arr[] = {
66169+ struct symsearch modarr[] = {
66170 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
66171 NOT_GPL_ONLY, false },
66172 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
66173@@ -300,7 +302,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
66174 #endif
66175 };
66176
66177- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
66178+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
66179 return true;
66180 }
66181 return false;
66182@@ -432,7 +434,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
66183 static int percpu_modalloc(struct module *mod,
66184 unsigned long size, unsigned long align)
66185 {
66186- if (align > PAGE_SIZE) {
66187+ if (align-1 >= PAGE_SIZE) {
66188 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
66189 mod->name, align, PAGE_SIZE);
66190 align = PAGE_SIZE;
66191@@ -1032,7 +1034,7 @@ struct module_attribute module_uevent =
66192 static ssize_t show_coresize(struct module_attribute *mattr,
66193 struct module_kobject *mk, char *buffer)
66194 {
66195- return sprintf(buffer, "%u\n", mk->mod->core_size);
66196+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
66197 }
66198
66199 static struct module_attribute modinfo_coresize =
66200@@ -1041,7 +1043,7 @@ static struct module_attribute modinfo_coresize =
66201 static ssize_t show_initsize(struct module_attribute *mattr,
66202 struct module_kobject *mk, char *buffer)
66203 {
66204- return sprintf(buffer, "%u\n", mk->mod->init_size);
66205+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
66206 }
66207
66208 static struct module_attribute modinfo_initsize =
66209@@ -1255,7 +1257,7 @@ resolve_symbol_wait(struct module *mod,
66210 */
66211 #ifdef CONFIG_SYSFS
66212
66213-#ifdef CONFIG_KALLSYMS
66214+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66215 static inline bool sect_empty(const Elf_Shdr *sect)
66216 {
66217 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
66218@@ -1721,21 +1723,21 @@ static void set_section_ro_nx(void *base,
66219
66220 static void unset_module_core_ro_nx(struct module *mod)
66221 {
66222- set_page_attributes(mod->module_core + mod->core_text_size,
66223- mod->module_core + mod->core_size,
66224+ set_page_attributes(mod->module_core_rw,
66225+ mod->module_core_rw + mod->core_size_rw,
66226 set_memory_x);
66227- set_page_attributes(mod->module_core,
66228- mod->module_core + mod->core_ro_size,
66229+ set_page_attributes(mod->module_core_rx,
66230+ mod->module_core_rx + mod->core_size_rx,
66231 set_memory_rw);
66232 }
66233
66234 static void unset_module_init_ro_nx(struct module *mod)
66235 {
66236- set_page_attributes(mod->module_init + mod->init_text_size,
66237- mod->module_init + mod->init_size,
66238+ set_page_attributes(mod->module_init_rw,
66239+ mod->module_init_rw + mod->init_size_rw,
66240 set_memory_x);
66241- set_page_attributes(mod->module_init,
66242- mod->module_init + mod->init_ro_size,
66243+ set_page_attributes(mod->module_init_rx,
66244+ mod->module_init_rx + mod->init_size_rx,
66245 set_memory_rw);
66246 }
66247
66248@@ -1746,14 +1748,14 @@ void set_all_modules_text_rw(void)
66249
66250 mutex_lock(&module_mutex);
66251 list_for_each_entry_rcu(mod, &modules, list) {
66252- if ((mod->module_core) && (mod->core_text_size)) {
66253- set_page_attributes(mod->module_core,
66254- mod->module_core + mod->core_text_size,
66255+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
66256+ set_page_attributes(mod->module_core_rx,
66257+ mod->module_core_rx + mod->core_size_rx,
66258 set_memory_rw);
66259 }
66260- if ((mod->module_init) && (mod->init_text_size)) {
66261- set_page_attributes(mod->module_init,
66262- mod->module_init + mod->init_text_size,
66263+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
66264+ set_page_attributes(mod->module_init_rx,
66265+ mod->module_init_rx + mod->init_size_rx,
66266 set_memory_rw);
66267 }
66268 }
66269@@ -1767,14 +1769,14 @@ void set_all_modules_text_ro(void)
66270
66271 mutex_lock(&module_mutex);
66272 list_for_each_entry_rcu(mod, &modules, list) {
66273- if ((mod->module_core) && (mod->core_text_size)) {
66274- set_page_attributes(mod->module_core,
66275- mod->module_core + mod->core_text_size,
66276+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
66277+ set_page_attributes(mod->module_core_rx,
66278+ mod->module_core_rx + mod->core_size_rx,
66279 set_memory_ro);
66280 }
66281- if ((mod->module_init) && (mod->init_text_size)) {
66282- set_page_attributes(mod->module_init,
66283- mod->module_init + mod->init_text_size,
66284+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
66285+ set_page_attributes(mod->module_init_rx,
66286+ mod->module_init_rx + mod->init_size_rx,
66287 set_memory_ro);
66288 }
66289 }
66290@@ -1820,16 +1822,19 @@ static void free_module(struct module *mod)
66291
66292 /* This may be NULL, but that's OK */
66293 unset_module_init_ro_nx(mod);
66294- module_free(mod, mod->module_init);
66295+ module_free(mod, mod->module_init_rw);
66296+ module_free_exec(mod, mod->module_init_rx);
66297 kfree(mod->args);
66298 percpu_modfree(mod);
66299
66300 /* Free lock-classes: */
66301- lockdep_free_key_range(mod->module_core, mod->core_size);
66302+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
66303+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
66304
66305 /* Finally, free the core (containing the module structure) */
66306 unset_module_core_ro_nx(mod);
66307- module_free(mod, mod->module_core);
66308+ module_free_exec(mod, mod->module_core_rx);
66309+ module_free(mod, mod->module_core_rw);
66310
66311 #ifdef CONFIG_MPU
66312 update_protections(current->mm);
66313@@ -1899,9 +1904,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66314 int ret = 0;
66315 const struct kernel_symbol *ksym;
66316
66317+#ifdef CONFIG_GRKERNSEC_MODHARDEN
66318+ int is_fs_load = 0;
66319+ int register_filesystem_found = 0;
66320+ char *p;
66321+
66322+ p = strstr(mod->args, "grsec_modharden_fs");
66323+ if (p) {
66324+ char *endptr = p + strlen("grsec_modharden_fs");
66325+ /* copy \0 as well */
66326+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
66327+ is_fs_load = 1;
66328+ }
66329+#endif
66330+
66331 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
66332 const char *name = info->strtab + sym[i].st_name;
66333
66334+#ifdef CONFIG_GRKERNSEC_MODHARDEN
66335+ /* it's a real shame this will never get ripped and copied
66336+ upstream! ;(
66337+ */
66338+ if (is_fs_load && !strcmp(name, "register_filesystem"))
66339+ register_filesystem_found = 1;
66340+#endif
66341+
66342 switch (sym[i].st_shndx) {
66343 case SHN_COMMON:
66344 /* We compiled with -fno-common. These are not
66345@@ -1922,7 +1949,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66346 ksym = resolve_symbol_wait(mod, info, name);
66347 /* Ok if resolved. */
66348 if (ksym && !IS_ERR(ksym)) {
66349+ pax_open_kernel();
66350 sym[i].st_value = ksym->value;
66351+ pax_close_kernel();
66352 break;
66353 }
66354
66355@@ -1941,11 +1970,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66356 secbase = (unsigned long)mod_percpu(mod);
66357 else
66358 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
66359+ pax_open_kernel();
66360 sym[i].st_value += secbase;
66361+ pax_close_kernel();
66362 break;
66363 }
66364 }
66365
66366+#ifdef CONFIG_GRKERNSEC_MODHARDEN
66367+ if (is_fs_load && !register_filesystem_found) {
66368+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
66369+ ret = -EPERM;
66370+ }
66371+#endif
66372+
66373 return ret;
66374 }
66375
66376@@ -2049,22 +2087,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
66377 || s->sh_entsize != ~0UL
66378 || strstarts(sname, ".init"))
66379 continue;
66380- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
66381+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
66382+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
66383+ else
66384+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
66385 pr_debug("\t%s\n", sname);
66386 }
66387- switch (m) {
66388- case 0: /* executable */
66389- mod->core_size = debug_align(mod->core_size);
66390- mod->core_text_size = mod->core_size;
66391- break;
66392- case 1: /* RO: text and ro-data */
66393- mod->core_size = debug_align(mod->core_size);
66394- mod->core_ro_size = mod->core_size;
66395- break;
66396- case 3: /* whole core */
66397- mod->core_size = debug_align(mod->core_size);
66398- break;
66399- }
66400 }
66401
66402 pr_debug("Init section allocation order:\n");
66403@@ -2078,23 +2106,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
66404 || s->sh_entsize != ~0UL
66405 || !strstarts(sname, ".init"))
66406 continue;
66407- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
66408- | INIT_OFFSET_MASK);
66409+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
66410+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
66411+ else
66412+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
66413+ s->sh_entsize |= INIT_OFFSET_MASK;
66414 pr_debug("\t%s\n", sname);
66415 }
66416- switch (m) {
66417- case 0: /* executable */
66418- mod->init_size = debug_align(mod->init_size);
66419- mod->init_text_size = mod->init_size;
66420- break;
66421- case 1: /* RO: text and ro-data */
66422- mod->init_size = debug_align(mod->init_size);
66423- mod->init_ro_size = mod->init_size;
66424- break;
66425- case 3: /* whole init */
66426- mod->init_size = debug_align(mod->init_size);
66427- break;
66428- }
66429 }
66430 }
66431
66432@@ -2266,7 +2284,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
66433
66434 /* Put symbol section at end of init part of module. */
66435 symsect->sh_flags |= SHF_ALLOC;
66436- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
66437+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
66438 info->index.sym) | INIT_OFFSET_MASK;
66439 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
66440
66441@@ -2281,13 +2299,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
66442 }
66443
66444 /* Append room for core symbols at end of core part. */
66445- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
66446- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
66447- mod->core_size += strtab_size;
66448+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
66449+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
66450+ mod->core_size_rx += strtab_size;
66451
66452 /* Put string table section at end of init part of module. */
66453 strsect->sh_flags |= SHF_ALLOC;
66454- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
66455+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
66456 info->index.str) | INIT_OFFSET_MASK;
66457 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
66458 }
66459@@ -2305,12 +2323,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
66460 /* Make sure we get permanent strtab: don't use info->strtab. */
66461 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
66462
66463+ pax_open_kernel();
66464+
66465 /* Set types up while we still have access to sections. */
66466 for (i = 0; i < mod->num_symtab; i++)
66467 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
66468
66469- mod->core_symtab = dst = mod->module_core + info->symoffs;
66470- mod->core_strtab = s = mod->module_core + info->stroffs;
66471+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
66472+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
66473 src = mod->symtab;
66474 *dst = *src;
66475 *s++ = 0;
66476@@ -2323,6 +2343,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
66477 s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1;
66478 }
66479 mod->core_num_syms = ndst;
66480+
66481+ pax_close_kernel();
66482 }
66483 #else
66484 static inline void layout_symtab(struct module *mod, struct load_info *info)
66485@@ -2356,17 +2378,33 @@ void * __weak module_alloc(unsigned long size)
66486 return size == 0 ? NULL : vmalloc_exec(size);
66487 }
66488
66489-static void *module_alloc_update_bounds(unsigned long size)
66490+static void *module_alloc_update_bounds_rw(unsigned long size)
66491 {
66492 void *ret = module_alloc(size);
66493
66494 if (ret) {
66495 mutex_lock(&module_mutex);
66496 /* Update module bounds. */
66497- if ((unsigned long)ret < module_addr_min)
66498- module_addr_min = (unsigned long)ret;
66499- if ((unsigned long)ret + size > module_addr_max)
66500- module_addr_max = (unsigned long)ret + size;
66501+ if ((unsigned long)ret < module_addr_min_rw)
66502+ module_addr_min_rw = (unsigned long)ret;
66503+ if ((unsigned long)ret + size > module_addr_max_rw)
66504+ module_addr_max_rw = (unsigned long)ret + size;
66505+ mutex_unlock(&module_mutex);
66506+ }
66507+ return ret;
66508+}
66509+
66510+static void *module_alloc_update_bounds_rx(unsigned long size)
66511+{
66512+ void *ret = module_alloc_exec(size);
66513+
66514+ if (ret) {
66515+ mutex_lock(&module_mutex);
66516+ /* Update module bounds. */
66517+ if ((unsigned long)ret < module_addr_min_rx)
66518+ module_addr_min_rx = (unsigned long)ret;
66519+ if ((unsigned long)ret + size > module_addr_max_rx)
66520+ module_addr_max_rx = (unsigned long)ret + size;
66521 mutex_unlock(&module_mutex);
66522 }
66523 return ret;
66524@@ -2543,8 +2581,14 @@ static struct module *setup_load_info(struct load_info *info)
66525 static int check_modinfo(struct module *mod, struct load_info *info)
66526 {
66527 const char *modmagic = get_modinfo(info, "vermagic");
66528+ const char *license = get_modinfo(info, "license");
66529 int err;
66530
66531+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
66532+ if (!license || !license_is_gpl_compatible(license))
66533+ return -ENOEXEC;
66534+#endif
66535+
66536 /* This is allowed: modprobe --force will invalidate it. */
66537 if (!modmagic) {
66538 err = try_to_force_load(mod, "bad vermagic");
66539@@ -2567,7 +2611,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
66540 }
66541
66542 /* Set up license info based on the info section */
66543- set_license(mod, get_modinfo(info, "license"));
66544+ set_license(mod, license);
66545
66546 return 0;
66547 }
66548@@ -2661,7 +2705,7 @@ static int move_module(struct module *mod, struct load_info *info)
66549 void *ptr;
66550
66551 /* Do the allocs. */
66552- ptr = module_alloc_update_bounds(mod->core_size);
66553+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
66554 /*
66555 * The pointer to this block is stored in the module structure
66556 * which is inside the block. Just mark it as not being a
66557@@ -2671,23 +2715,50 @@ static int move_module(struct module *mod, struct load_info *info)
66558 if (!ptr)
66559 return -ENOMEM;
66560
66561- memset(ptr, 0, mod->core_size);
66562- mod->module_core = ptr;
66563+ memset(ptr, 0, mod->core_size_rw);
66564+ mod->module_core_rw = ptr;
66565
66566- ptr = module_alloc_update_bounds(mod->init_size);
66567+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
66568 /*
66569 * The pointer to this block is stored in the module structure
66570 * which is inside the block. This block doesn't need to be
66571 * scanned as it contains data and code that will be freed
66572 * after the module is initialized.
66573 */
66574- kmemleak_ignore(ptr);
66575- if (!ptr && mod->init_size) {
66576- module_free(mod, mod->module_core);
66577+ kmemleak_not_leak(ptr);
66578+ if (!ptr && mod->init_size_rw) {
66579+ module_free(mod, mod->module_core_rw);
66580 return -ENOMEM;
66581 }
66582- memset(ptr, 0, mod->init_size);
66583- mod->module_init = ptr;
66584+ memset(ptr, 0, mod->init_size_rw);
66585+ mod->module_init_rw = ptr;
66586+
66587+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
66588+ kmemleak_not_leak(ptr);
66589+ if (!ptr) {
66590+ module_free(mod, mod->module_init_rw);
66591+ module_free(mod, mod->module_core_rw);
66592+ return -ENOMEM;
66593+ }
66594+
66595+ pax_open_kernel();
66596+ memset(ptr, 0, mod->core_size_rx);
66597+ pax_close_kernel();
66598+ mod->module_core_rx = ptr;
66599+
66600+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
66601+ kmemleak_not_leak(ptr);
66602+ if (!ptr && mod->init_size_rx) {
66603+ module_free_exec(mod, mod->module_core_rx);
66604+ module_free(mod, mod->module_init_rw);
66605+ module_free(mod, mod->module_core_rw);
66606+ return -ENOMEM;
66607+ }
66608+
66609+ pax_open_kernel();
66610+ memset(ptr, 0, mod->init_size_rx);
66611+ pax_close_kernel();
66612+ mod->module_init_rx = ptr;
66613
66614 /* Transfer each section which specifies SHF_ALLOC */
66615 pr_debug("final section addresses:\n");
66616@@ -2698,16 +2769,45 @@ static int move_module(struct module *mod, struct load_info *info)
66617 if (!(shdr->sh_flags & SHF_ALLOC))
66618 continue;
66619
66620- if (shdr->sh_entsize & INIT_OFFSET_MASK)
66621- dest = mod->module_init
66622- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66623- else
66624- dest = mod->module_core + shdr->sh_entsize;
66625+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
66626+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
66627+ dest = mod->module_init_rw
66628+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66629+ else
66630+ dest = mod->module_init_rx
66631+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66632+ } else {
66633+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
66634+ dest = mod->module_core_rw + shdr->sh_entsize;
66635+ else
66636+ dest = mod->module_core_rx + shdr->sh_entsize;
66637+ }
66638+
66639+ if (shdr->sh_type != SHT_NOBITS) {
66640+
66641+#ifdef CONFIG_PAX_KERNEXEC
66642+#ifdef CONFIG_X86_64
66643+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
66644+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
66645+#endif
66646+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
66647+ pax_open_kernel();
66648+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
66649+ pax_close_kernel();
66650+ } else
66651+#endif
66652
66653- if (shdr->sh_type != SHT_NOBITS)
66654 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
66655+ }
66656 /* Update sh_addr to point to copy in image. */
66657- shdr->sh_addr = (unsigned long)dest;
66658+
66659+#ifdef CONFIG_PAX_KERNEXEC
66660+ if (shdr->sh_flags & SHF_EXECINSTR)
66661+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
66662+ else
66663+#endif
66664+
66665+ shdr->sh_addr = (unsigned long)dest;
66666 pr_debug("\t0x%lx %s\n",
66667 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
66668 }
66669@@ -2758,12 +2858,12 @@ static void flush_module_icache(const struct module *mod)
66670 * Do it before processing of module parameters, so the module
66671 * can provide parameter accessor functions of its own.
66672 */
66673- if (mod->module_init)
66674- flush_icache_range((unsigned long)mod->module_init,
66675- (unsigned long)mod->module_init
66676- + mod->init_size);
66677- flush_icache_range((unsigned long)mod->module_core,
66678- (unsigned long)mod->module_core + mod->core_size);
66679+ if (mod->module_init_rx)
66680+ flush_icache_range((unsigned long)mod->module_init_rx,
66681+ (unsigned long)mod->module_init_rx
66682+ + mod->init_size_rx);
66683+ flush_icache_range((unsigned long)mod->module_core_rx,
66684+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
66685
66686 set_fs(old_fs);
66687 }
66688@@ -2833,8 +2933,10 @@ out:
66689 static void module_deallocate(struct module *mod, struct load_info *info)
66690 {
66691 percpu_modfree(mod);
66692- module_free(mod, mod->module_init);
66693- module_free(mod, mod->module_core);
66694+ module_free_exec(mod, mod->module_init_rx);
66695+ module_free_exec(mod, mod->module_core_rx);
66696+ module_free(mod, mod->module_init_rw);
66697+ module_free(mod, mod->module_core_rw);
66698 }
66699
66700 int __weak module_finalize(const Elf_Ehdr *hdr,
66701@@ -2898,9 +3000,38 @@ static struct module *load_module(void __user *umod,
66702 if (err)
66703 goto free_unload;
66704
66705+ /* Now copy in args */
66706+ mod->args = strndup_user(uargs, ~0UL >> 1);
66707+ if (IS_ERR(mod->args)) {
66708+ err = PTR_ERR(mod->args);
66709+ goto free_unload;
66710+ }
66711+
66712 /* Set up MODINFO_ATTR fields */
66713 setup_modinfo(mod, &info);
66714
66715+#ifdef CONFIG_GRKERNSEC_MODHARDEN
66716+ {
66717+ char *p, *p2;
66718+
66719+ if (strstr(mod->args, "grsec_modharden_netdev")) {
66720+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
66721+ err = -EPERM;
66722+ goto free_modinfo;
66723+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
66724+ p += strlen("grsec_modharden_normal");
66725+ p2 = strstr(p, "_");
66726+ if (p2) {
66727+ *p2 = '\0';
66728+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
66729+ *p2 = '_';
66730+ }
66731+ err = -EPERM;
66732+ goto free_modinfo;
66733+ }
66734+ }
66735+#endif
66736+
66737 /* Fix up syms, so that st_value is a pointer to location. */
66738 err = simplify_symbols(mod, &info);
66739 if (err < 0)
66740@@ -2916,13 +3047,6 @@ static struct module *load_module(void __user *umod,
66741
66742 flush_module_icache(mod);
66743
66744- /* Now copy in args */
66745- mod->args = strndup_user(uargs, ~0UL >> 1);
66746- if (IS_ERR(mod->args)) {
66747- err = PTR_ERR(mod->args);
66748- goto free_arch_cleanup;
66749- }
66750-
66751 /* Mark state as coming so strong_try_module_get() ignores us. */
66752 mod->state = MODULE_STATE_COMING;
66753
66754@@ -2980,11 +3104,10 @@ static struct module *load_module(void __user *umod,
66755 unlock:
66756 mutex_unlock(&module_mutex);
66757 synchronize_sched();
66758- kfree(mod->args);
66759- free_arch_cleanup:
66760 module_arch_cleanup(mod);
66761 free_modinfo:
66762 free_modinfo(mod);
66763+ kfree(mod->args);
66764 free_unload:
66765 module_unload_free(mod);
66766 free_module:
66767@@ -3025,16 +3148,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
66768 MODULE_STATE_COMING, mod);
66769
66770 /* Set RO and NX regions for core */
66771- set_section_ro_nx(mod->module_core,
66772- mod->core_text_size,
66773- mod->core_ro_size,
66774- mod->core_size);
66775+ set_section_ro_nx(mod->module_core_rx,
66776+ mod->core_size_rx,
66777+ mod->core_size_rx,
66778+ mod->core_size_rx);
66779
66780 /* Set RO and NX regions for init */
66781- set_section_ro_nx(mod->module_init,
66782- mod->init_text_size,
66783- mod->init_ro_size,
66784- mod->init_size);
66785+ set_section_ro_nx(mod->module_init_rx,
66786+ mod->init_size_rx,
66787+ mod->init_size_rx,
66788+ mod->init_size_rx);
66789
66790 do_mod_ctors(mod);
66791 /* Start the module */
66792@@ -3080,11 +3203,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
66793 mod->strtab = mod->core_strtab;
66794 #endif
66795 unset_module_init_ro_nx(mod);
66796- module_free(mod, mod->module_init);
66797- mod->module_init = NULL;
66798- mod->init_size = 0;
66799- mod->init_ro_size = 0;
66800- mod->init_text_size = 0;
66801+ module_free(mod, mod->module_init_rw);
66802+ module_free_exec(mod, mod->module_init_rx);
66803+ mod->module_init_rw = NULL;
66804+ mod->module_init_rx = NULL;
66805+ mod->init_size_rw = 0;
66806+ mod->init_size_rx = 0;
66807 mutex_unlock(&module_mutex);
66808
66809 return 0;
66810@@ -3115,10 +3239,16 @@ static const char *get_ksymbol(struct module *mod,
66811 unsigned long nextval;
66812
66813 /* At worse, next value is at end of module */
66814- if (within_module_init(addr, mod))
66815- nextval = (unsigned long)mod->module_init+mod->init_text_size;
66816+ if (within_module_init_rx(addr, mod))
66817+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
66818+ else if (within_module_init_rw(addr, mod))
66819+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
66820+ else if (within_module_core_rx(addr, mod))
66821+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
66822+ else if (within_module_core_rw(addr, mod))
66823+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
66824 else
66825- nextval = (unsigned long)mod->module_core+mod->core_text_size;
66826+ return NULL;
66827
66828 /* Scan for closest preceding symbol, and next symbol. (ELF
66829 starts real symbols at 1). */
66830@@ -3353,7 +3483,7 @@ static int m_show(struct seq_file *m, void *p)
66831 char buf[8];
66832
66833 seq_printf(m, "%s %u",
66834- mod->name, mod->init_size + mod->core_size);
66835+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
66836 print_unload_info(m, mod);
66837
66838 /* Informative for users. */
66839@@ -3362,7 +3492,7 @@ static int m_show(struct seq_file *m, void *p)
66840 mod->state == MODULE_STATE_COMING ? "Loading":
66841 "Live");
66842 /* Used by oprofile and other similar tools. */
66843- seq_printf(m, " 0x%pK", mod->module_core);
66844+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
66845
66846 /* Taints info */
66847 if (mod->taints)
66848@@ -3398,7 +3528,17 @@ static const struct file_operations proc_modules_operations = {
66849
66850 static int __init proc_modules_init(void)
66851 {
66852+#ifndef CONFIG_GRKERNSEC_HIDESYM
66853+#ifdef CONFIG_GRKERNSEC_PROC_USER
66854+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
66855+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66856+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
66857+#else
66858 proc_create("modules", 0, NULL, &proc_modules_operations);
66859+#endif
66860+#else
66861+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
66862+#endif
66863 return 0;
66864 }
66865 module_init(proc_modules_init);
66866@@ -3457,12 +3597,12 @@ struct module *__module_address(unsigned long addr)
66867 {
66868 struct module *mod;
66869
66870- if (addr < module_addr_min || addr > module_addr_max)
66871+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
66872+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
66873 return NULL;
66874
66875 list_for_each_entry_rcu(mod, &modules, list)
66876- if (within_module_core(addr, mod)
66877- || within_module_init(addr, mod))
66878+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
66879 return mod;
66880 return NULL;
66881 }
66882@@ -3496,11 +3636,20 @@ bool is_module_text_address(unsigned long addr)
66883 */
66884 struct module *__module_text_address(unsigned long addr)
66885 {
66886- struct module *mod = __module_address(addr);
66887+ struct module *mod;
66888+
66889+#ifdef CONFIG_X86_32
66890+ addr = ktla_ktva(addr);
66891+#endif
66892+
66893+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
66894+ return NULL;
66895+
66896+ mod = __module_address(addr);
66897+
66898 if (mod) {
66899 /* Make sure it's within the text section. */
66900- if (!within(addr, mod->module_init, mod->init_text_size)
66901- && !within(addr, mod->module_core, mod->core_text_size))
66902+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
66903 mod = NULL;
66904 }
66905 return mod;
66906diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
66907index 7e3443f..b2a1e6b 100644
66908--- a/kernel/mutex-debug.c
66909+++ b/kernel/mutex-debug.c
66910@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
66911 }
66912
66913 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66914- struct thread_info *ti)
66915+ struct task_struct *task)
66916 {
66917 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
66918
66919 /* Mark the current thread as blocked on the lock: */
66920- ti->task->blocked_on = waiter;
66921+ task->blocked_on = waiter;
66922 }
66923
66924 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66925- struct thread_info *ti)
66926+ struct task_struct *task)
66927 {
66928 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
66929- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
66930- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
66931- ti->task->blocked_on = NULL;
66932+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
66933+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
66934+ task->blocked_on = NULL;
66935
66936 list_del_init(&waiter->list);
66937 waiter->task = NULL;
66938diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
66939index 0799fd3..d06ae3b 100644
66940--- a/kernel/mutex-debug.h
66941+++ b/kernel/mutex-debug.h
66942@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
66943 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
66944 extern void debug_mutex_add_waiter(struct mutex *lock,
66945 struct mutex_waiter *waiter,
66946- struct thread_info *ti);
66947+ struct task_struct *task);
66948 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66949- struct thread_info *ti);
66950+ struct task_struct *task);
66951 extern void debug_mutex_unlock(struct mutex *lock);
66952 extern void debug_mutex_init(struct mutex *lock, const char *name,
66953 struct lock_class_key *key);
66954diff --git a/kernel/mutex.c b/kernel/mutex.c
66955index a307cc9..27fd2e9 100644
66956--- a/kernel/mutex.c
66957+++ b/kernel/mutex.c
66958@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66959 spin_lock_mutex(&lock->wait_lock, flags);
66960
66961 debug_mutex_lock_common(lock, &waiter);
66962- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
66963+ debug_mutex_add_waiter(lock, &waiter, task);
66964
66965 /* add waiting tasks to the end of the waitqueue (FIFO): */
66966 list_add_tail(&waiter.list, &lock->wait_list);
66967@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66968 * TASK_UNINTERRUPTIBLE case.)
66969 */
66970 if (unlikely(signal_pending_state(state, task))) {
66971- mutex_remove_waiter(lock, &waiter,
66972- task_thread_info(task));
66973+ mutex_remove_waiter(lock, &waiter, task);
66974 mutex_release(&lock->dep_map, 1, ip);
66975 spin_unlock_mutex(&lock->wait_lock, flags);
66976
66977@@ -247,7 +246,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66978 done:
66979 lock_acquired(&lock->dep_map, ip);
66980 /* got the lock - rejoice! */
66981- mutex_remove_waiter(lock, &waiter, current_thread_info());
66982+ mutex_remove_waiter(lock, &waiter, task);
66983 mutex_set_owner(lock);
66984
66985 /* set it to 0 if there are no waiters left: */
66986diff --git a/kernel/panic.c b/kernel/panic.c
66987index 8ed89a1..e83856a 100644
66988--- a/kernel/panic.c
66989+++ b/kernel/panic.c
66990@@ -402,7 +402,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
66991 const char *board;
66992
66993 printk(KERN_WARNING "------------[ cut here ]------------\n");
66994- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
66995+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
66996 board = dmi_get_system_info(DMI_PRODUCT_NAME);
66997 if (board)
66998 printk(KERN_WARNING "Hardware name: %s\n", board);
66999@@ -457,7 +457,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
67000 */
67001 void __stack_chk_fail(void)
67002 {
67003- panic("stack-protector: Kernel stack is corrupted in: %p\n",
67004+ dump_stack();
67005+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
67006 __builtin_return_address(0));
67007 }
67008 EXPORT_SYMBOL(__stack_chk_fail);
67009diff --git a/kernel/pid.c b/kernel/pid.c
67010index 9f08dfa..6765c40 100644
67011--- a/kernel/pid.c
67012+++ b/kernel/pid.c
67013@@ -33,6 +33,7 @@
67014 #include <linux/rculist.h>
67015 #include <linux/bootmem.h>
67016 #include <linux/hash.h>
67017+#include <linux/security.h>
67018 #include <linux/pid_namespace.h>
67019 #include <linux/init_task.h>
67020 #include <linux/syscalls.h>
67021@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
67022
67023 int pid_max = PID_MAX_DEFAULT;
67024
67025-#define RESERVED_PIDS 300
67026+#define RESERVED_PIDS 500
67027
67028 int pid_max_min = RESERVED_PIDS + 1;
67029 int pid_max_max = PID_MAX_LIMIT;
67030@@ -420,10 +421,18 @@ EXPORT_SYMBOL(pid_task);
67031 */
67032 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
67033 {
67034+ struct task_struct *task;
67035+
67036 rcu_lockdep_assert(rcu_read_lock_held(),
67037 "find_task_by_pid_ns() needs rcu_read_lock()"
67038 " protection");
67039- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
67040+
67041+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
67042+
67043+ if (gr_pid_is_chrooted(task))
67044+ return NULL;
67045+
67046+ return task;
67047 }
67048
67049 struct task_struct *find_task_by_vpid(pid_t vnr)
67050@@ -431,6 +440,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
67051 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
67052 }
67053
67054+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
67055+{
67056+ rcu_lockdep_assert(rcu_read_lock_held(),
67057+ "find_task_by_pid_ns() needs rcu_read_lock()"
67058+ " protection");
67059+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
67060+}
67061+
67062 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
67063 {
67064 struct pid *pid;
67065diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
67066index 125cb67..a4d1c30 100644
67067--- a/kernel/posix-cpu-timers.c
67068+++ b/kernel/posix-cpu-timers.c
67069@@ -6,6 +6,7 @@
67070 #include <linux/posix-timers.h>
67071 #include <linux/errno.h>
67072 #include <linux/math64.h>
67073+#include <linux/security.h>
67074 #include <asm/uaccess.h>
67075 #include <linux/kernel_stat.h>
67076 #include <trace/events/timer.h>
67077@@ -1578,14 +1579,14 @@ struct k_clock clock_posix_cpu = {
67078
67079 static __init int init_posix_cpu_timers(void)
67080 {
67081- struct k_clock process = {
67082+ static struct k_clock process = {
67083 .clock_getres = process_cpu_clock_getres,
67084 .clock_get = process_cpu_clock_get,
67085 .timer_create = process_cpu_timer_create,
67086 .nsleep = process_cpu_nsleep,
67087 .nsleep_restart = process_cpu_nsleep_restart,
67088 };
67089- struct k_clock thread = {
67090+ static struct k_clock thread = {
67091 .clock_getres = thread_cpu_clock_getres,
67092 .clock_get = thread_cpu_clock_get,
67093 .timer_create = thread_cpu_timer_create,
67094diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
67095index 69185ae..cc2847a 100644
67096--- a/kernel/posix-timers.c
67097+++ b/kernel/posix-timers.c
67098@@ -43,6 +43,7 @@
67099 #include <linux/idr.h>
67100 #include <linux/posix-clock.h>
67101 #include <linux/posix-timers.h>
67102+#include <linux/grsecurity.h>
67103 #include <linux/syscalls.h>
67104 #include <linux/wait.h>
67105 #include <linux/workqueue.h>
67106@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
67107 * which we beg off on and pass to do_sys_settimeofday().
67108 */
67109
67110-static struct k_clock posix_clocks[MAX_CLOCKS];
67111+static struct k_clock *posix_clocks[MAX_CLOCKS];
67112
67113 /*
67114 * These ones are defined below.
67115@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
67116 */
67117 static __init int init_posix_timers(void)
67118 {
67119- struct k_clock clock_realtime = {
67120+ static struct k_clock clock_realtime = {
67121 .clock_getres = hrtimer_get_res,
67122 .clock_get = posix_clock_realtime_get,
67123 .clock_set = posix_clock_realtime_set,
67124@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
67125 .timer_get = common_timer_get,
67126 .timer_del = common_timer_del,
67127 };
67128- struct k_clock clock_monotonic = {
67129+ static struct k_clock clock_monotonic = {
67130 .clock_getres = hrtimer_get_res,
67131 .clock_get = posix_ktime_get_ts,
67132 .nsleep = common_nsleep,
67133@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
67134 .timer_get = common_timer_get,
67135 .timer_del = common_timer_del,
67136 };
67137- struct k_clock clock_monotonic_raw = {
67138+ static struct k_clock clock_monotonic_raw = {
67139 .clock_getres = hrtimer_get_res,
67140 .clock_get = posix_get_monotonic_raw,
67141 };
67142- struct k_clock clock_realtime_coarse = {
67143+ static struct k_clock clock_realtime_coarse = {
67144 .clock_getres = posix_get_coarse_res,
67145 .clock_get = posix_get_realtime_coarse,
67146 };
67147- struct k_clock clock_monotonic_coarse = {
67148+ static struct k_clock clock_monotonic_coarse = {
67149 .clock_getres = posix_get_coarse_res,
67150 .clock_get = posix_get_monotonic_coarse,
67151 };
67152- struct k_clock clock_boottime = {
67153+ static struct k_clock clock_boottime = {
67154 .clock_getres = hrtimer_get_res,
67155 .clock_get = posix_get_boottime,
67156 .nsleep = common_nsleep,
67157@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
67158 return;
67159 }
67160
67161- posix_clocks[clock_id] = *new_clock;
67162+ posix_clocks[clock_id] = new_clock;
67163 }
67164 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
67165
67166@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
67167 return (id & CLOCKFD_MASK) == CLOCKFD ?
67168 &clock_posix_dynamic : &clock_posix_cpu;
67169
67170- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
67171+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
67172 return NULL;
67173- return &posix_clocks[id];
67174+ return posix_clocks[id];
67175 }
67176
67177 static int common_timer_create(struct k_itimer *new_timer)
67178@@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
67179 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
67180 return -EFAULT;
67181
67182+ /* only the CLOCK_REALTIME clock can be set, all other clocks
67183+ have their clock_set fptr set to a nosettime dummy function
67184+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
67185+ call common_clock_set, which calls do_sys_settimeofday, which
67186+ we hook
67187+ */
67188+
67189 return kc->clock_set(which_clock, &new_tp);
67190 }
67191
67192diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
67193index d523593..68197a4 100644
67194--- a/kernel/power/poweroff.c
67195+++ b/kernel/power/poweroff.c
67196@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
67197 .enable_mask = SYSRQ_ENABLE_BOOT,
67198 };
67199
67200-static int pm_sysrq_init(void)
67201+static int __init pm_sysrq_init(void)
67202 {
67203 register_sysrq_key('o', &sysrq_poweroff_op);
67204 return 0;
67205diff --git a/kernel/power/process.c b/kernel/power/process.c
67206index 19db29f..33b52b6 100644
67207--- a/kernel/power/process.c
67208+++ b/kernel/power/process.c
67209@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
67210 u64 elapsed_csecs64;
67211 unsigned int elapsed_csecs;
67212 bool wakeup = false;
67213+ bool timedout = false;
67214
67215 do_gettimeofday(&start);
67216
67217@@ -43,6 +44,8 @@ static int try_to_freeze_tasks(bool user_only)
67218
67219 while (true) {
67220 todo = 0;
67221+ if (time_after(jiffies, end_time))
67222+ timedout = true;
67223 read_lock(&tasklist_lock);
67224 do_each_thread(g, p) {
67225 if (p == current || !freeze_task(p))
67226@@ -58,9 +61,13 @@ static int try_to_freeze_tasks(bool user_only)
67227 * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING
67228 * transition can't race with task state testing here.
67229 */
67230- if (!task_is_stopped_or_traced(p) &&
67231- !freezer_should_skip(p))
67232+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
67233 todo++;
67234+ if (timedout) {
67235+ printk(KERN_ERR "Task refusing to freeze:\n");
67236+ sched_show_task(p);
67237+ }
67238+ }
67239 } while_each_thread(g, p);
67240 read_unlock(&tasklist_lock);
67241
67242@@ -69,7 +76,7 @@ static int try_to_freeze_tasks(bool user_only)
67243 todo += wq_busy;
67244 }
67245
67246- if (!todo || time_after(jiffies, end_time))
67247+ if (!todo || timedout)
67248 break;
67249
67250 if (pm_wakeup_pending()) {
67251diff --git a/kernel/printk.c b/kernel/printk.c
67252index b663c2c..1d6ba7a 100644
67253--- a/kernel/printk.c
67254+++ b/kernel/printk.c
67255@@ -316,6 +316,11 @@ static int check_syslog_permissions(int type, bool from_file)
67256 if (from_file && type != SYSLOG_ACTION_OPEN)
67257 return 0;
67258
67259+#ifdef CONFIG_GRKERNSEC_DMESG
67260+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
67261+ return -EPERM;
67262+#endif
67263+
67264 if (syslog_action_restricted(type)) {
67265 if (capable(CAP_SYSLOG))
67266 return 0;
67267diff --git a/kernel/profile.c b/kernel/profile.c
67268index 76b8e77..a2930e8 100644
67269--- a/kernel/profile.c
67270+++ b/kernel/profile.c
67271@@ -39,7 +39,7 @@ struct profile_hit {
67272 /* Oprofile timer tick hook */
67273 static int (*timer_hook)(struct pt_regs *) __read_mostly;
67274
67275-static atomic_t *prof_buffer;
67276+static atomic_unchecked_t *prof_buffer;
67277 static unsigned long prof_len, prof_shift;
67278
67279 int prof_on __read_mostly;
67280@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
67281 hits[i].pc = 0;
67282 continue;
67283 }
67284- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67285+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67286 hits[i].hits = hits[i].pc = 0;
67287 }
67288 }
67289@@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67290 * Add the current hit(s) and flush the write-queue out
67291 * to the global buffer:
67292 */
67293- atomic_add(nr_hits, &prof_buffer[pc]);
67294+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
67295 for (i = 0; i < NR_PROFILE_HIT; ++i) {
67296- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67297+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67298 hits[i].pc = hits[i].hits = 0;
67299 }
67300 out:
67301@@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67302 {
67303 unsigned long pc;
67304 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
67305- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67306+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67307 }
67308 #endif /* !CONFIG_SMP */
67309
67310@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
67311 return -EFAULT;
67312 buf++; p++; count--; read++;
67313 }
67314- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
67315+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
67316 if (copy_to_user(buf, (void *)pnt, count))
67317 return -EFAULT;
67318 read += count;
67319@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
67320 }
67321 #endif
67322 profile_discard_flip_buffers();
67323- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
67324+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
67325 return count;
67326 }
67327
67328diff --git a/kernel/ptrace.c b/kernel/ptrace.c
67329index ee8d49b..bd3d790 100644
67330--- a/kernel/ptrace.c
67331+++ b/kernel/ptrace.c
67332@@ -280,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request,
67333
67334 if (seize)
67335 flags |= PT_SEIZED;
67336- if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
67337+ if (ns_capable_nolog(task_user_ns(task), CAP_SYS_PTRACE))
67338 flags |= PT_PTRACE_CAP;
67339 task->ptrace = flags;
67340
67341@@ -487,7 +487,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
67342 break;
67343 return -EIO;
67344 }
67345- if (copy_to_user(dst, buf, retval))
67346+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
67347 return -EFAULT;
67348 copied += retval;
67349 src += retval;
67350@@ -672,7 +672,7 @@ int ptrace_request(struct task_struct *child, long request,
67351 bool seized = child->ptrace & PT_SEIZED;
67352 int ret = -EIO;
67353 siginfo_t siginfo, *si;
67354- void __user *datavp = (void __user *) data;
67355+ void __user *datavp = (__force void __user *) data;
67356 unsigned long __user *datalp = datavp;
67357 unsigned long flags;
67358
67359@@ -874,14 +874,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
67360 goto out;
67361 }
67362
67363+ if (gr_handle_ptrace(child, request)) {
67364+ ret = -EPERM;
67365+ goto out_put_task_struct;
67366+ }
67367+
67368 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
67369 ret = ptrace_attach(child, request, addr, data);
67370 /*
67371 * Some architectures need to do book-keeping after
67372 * a ptrace attach.
67373 */
67374- if (!ret)
67375+ if (!ret) {
67376 arch_ptrace_attach(child);
67377+ gr_audit_ptrace(child);
67378+ }
67379 goto out_put_task_struct;
67380 }
67381
67382@@ -907,7 +914,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
67383 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
67384 if (copied != sizeof(tmp))
67385 return -EIO;
67386- return put_user(tmp, (unsigned long __user *)data);
67387+ return put_user(tmp, (__force unsigned long __user *)data);
67388 }
67389
67390 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
67391@@ -1017,14 +1024,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
67392 goto out;
67393 }
67394
67395+ if (gr_handle_ptrace(child, request)) {
67396+ ret = -EPERM;
67397+ goto out_put_task_struct;
67398+ }
67399+
67400 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
67401 ret = ptrace_attach(child, request, addr, data);
67402 /*
67403 * Some architectures need to do book-keeping after
67404 * a ptrace attach.
67405 */
67406- if (!ret)
67407+ if (!ret) {
67408 arch_ptrace_attach(child);
67409+ gr_audit_ptrace(child);
67410+ }
67411 goto out_put_task_struct;
67412 }
67413
67414diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
67415index 37a5444..eec170a 100644
67416--- a/kernel/rcutiny.c
67417+++ b/kernel/rcutiny.c
67418@@ -46,7 +46,7 @@
67419 struct rcu_ctrlblk;
67420 static void invoke_rcu_callbacks(void);
67421 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
67422-static void rcu_process_callbacks(struct softirq_action *unused);
67423+static void rcu_process_callbacks(void);
67424 static void __call_rcu(struct rcu_head *head,
67425 void (*func)(struct rcu_head *rcu),
67426 struct rcu_ctrlblk *rcp);
67427@@ -307,7 +307,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
67428 rcu_is_callbacks_kthread()));
67429 }
67430
67431-static void rcu_process_callbacks(struct softirq_action *unused)
67432+static void rcu_process_callbacks(void)
67433 {
67434 __rcu_process_callbacks(&rcu_sched_ctrlblk);
67435 __rcu_process_callbacks(&rcu_bh_ctrlblk);
67436diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
67437index 22ecea0..3789898 100644
67438--- a/kernel/rcutiny_plugin.h
67439+++ b/kernel/rcutiny_plugin.h
67440@@ -955,7 +955,7 @@ static int rcu_kthread(void *arg)
67441 have_rcu_kthread_work = morework;
67442 local_irq_restore(flags);
67443 if (work)
67444- rcu_process_callbacks(NULL);
67445+ rcu_process_callbacks();
67446 schedule_timeout_interruptible(1); /* Leave CPU for others. */
67447 }
67448
67449diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
67450index a89b381..efdcad8 100644
67451--- a/kernel/rcutorture.c
67452+++ b/kernel/rcutorture.c
67453@@ -158,12 +158,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
67454 { 0 };
67455 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
67456 { 0 };
67457-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
67458-static atomic_t n_rcu_torture_alloc;
67459-static atomic_t n_rcu_torture_alloc_fail;
67460-static atomic_t n_rcu_torture_free;
67461-static atomic_t n_rcu_torture_mberror;
67462-static atomic_t n_rcu_torture_error;
67463+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
67464+static atomic_unchecked_t n_rcu_torture_alloc;
67465+static atomic_unchecked_t n_rcu_torture_alloc_fail;
67466+static atomic_unchecked_t n_rcu_torture_free;
67467+static atomic_unchecked_t n_rcu_torture_mberror;
67468+static atomic_unchecked_t n_rcu_torture_error;
67469 static long n_rcu_torture_boost_ktrerror;
67470 static long n_rcu_torture_boost_rterror;
67471 static long n_rcu_torture_boost_failure;
67472@@ -253,11 +253,11 @@ rcu_torture_alloc(void)
67473
67474 spin_lock_bh(&rcu_torture_lock);
67475 if (list_empty(&rcu_torture_freelist)) {
67476- atomic_inc(&n_rcu_torture_alloc_fail);
67477+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
67478 spin_unlock_bh(&rcu_torture_lock);
67479 return NULL;
67480 }
67481- atomic_inc(&n_rcu_torture_alloc);
67482+ atomic_inc_unchecked(&n_rcu_torture_alloc);
67483 p = rcu_torture_freelist.next;
67484 list_del_init(p);
67485 spin_unlock_bh(&rcu_torture_lock);
67486@@ -270,7 +270,7 @@ rcu_torture_alloc(void)
67487 static void
67488 rcu_torture_free(struct rcu_torture *p)
67489 {
67490- atomic_inc(&n_rcu_torture_free);
67491+ atomic_inc_unchecked(&n_rcu_torture_free);
67492 spin_lock_bh(&rcu_torture_lock);
67493 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
67494 spin_unlock_bh(&rcu_torture_lock);
67495@@ -390,7 +390,7 @@ rcu_torture_cb(struct rcu_head *p)
67496 i = rp->rtort_pipe_count;
67497 if (i > RCU_TORTURE_PIPE_LEN)
67498 i = RCU_TORTURE_PIPE_LEN;
67499- atomic_inc(&rcu_torture_wcount[i]);
67500+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
67501 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
67502 rp->rtort_mbtest = 0;
67503 rcu_torture_free(rp);
67504@@ -437,7 +437,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
67505 i = rp->rtort_pipe_count;
67506 if (i > RCU_TORTURE_PIPE_LEN)
67507 i = RCU_TORTURE_PIPE_LEN;
67508- atomic_inc(&rcu_torture_wcount[i]);
67509+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
67510 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
67511 rp->rtort_mbtest = 0;
67512 list_del(&rp->rtort_free);
67513@@ -926,7 +926,7 @@ rcu_torture_writer(void *arg)
67514 i = old_rp->rtort_pipe_count;
67515 if (i > RCU_TORTURE_PIPE_LEN)
67516 i = RCU_TORTURE_PIPE_LEN;
67517- atomic_inc(&rcu_torture_wcount[i]);
67518+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
67519 old_rp->rtort_pipe_count++;
67520 cur_ops->deferred_free(old_rp);
67521 }
67522@@ -1007,7 +1007,7 @@ static void rcu_torture_timer(unsigned long unused)
67523 }
67524 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
67525 if (p->rtort_mbtest == 0)
67526- atomic_inc(&n_rcu_torture_mberror);
67527+ atomic_inc_unchecked(&n_rcu_torture_mberror);
67528 spin_lock(&rand_lock);
67529 cur_ops->read_delay(&rand);
67530 n_rcu_torture_timers++;
67531@@ -1071,7 +1071,7 @@ rcu_torture_reader(void *arg)
67532 }
67533 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
67534 if (p->rtort_mbtest == 0)
67535- atomic_inc(&n_rcu_torture_mberror);
67536+ atomic_inc_unchecked(&n_rcu_torture_mberror);
67537 cur_ops->read_delay(&rand);
67538 preempt_disable();
67539 pipe_count = p->rtort_pipe_count;
67540@@ -1133,10 +1133,10 @@ rcu_torture_printk(char *page)
67541 rcu_torture_current,
67542 rcu_torture_current_version,
67543 list_empty(&rcu_torture_freelist),
67544- atomic_read(&n_rcu_torture_alloc),
67545- atomic_read(&n_rcu_torture_alloc_fail),
67546- atomic_read(&n_rcu_torture_free),
67547- atomic_read(&n_rcu_torture_mberror),
67548+ atomic_read_unchecked(&n_rcu_torture_alloc),
67549+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
67550+ atomic_read_unchecked(&n_rcu_torture_free),
67551+ atomic_read_unchecked(&n_rcu_torture_mberror),
67552 n_rcu_torture_boost_ktrerror,
67553 n_rcu_torture_boost_rterror,
67554 n_rcu_torture_boost_failure,
67555@@ -1146,7 +1146,7 @@ rcu_torture_printk(char *page)
67556 n_online_attempts,
67557 n_offline_successes,
67558 n_offline_attempts);
67559- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
67560+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
67561 n_rcu_torture_boost_ktrerror != 0 ||
67562 n_rcu_torture_boost_rterror != 0 ||
67563 n_rcu_torture_boost_failure != 0)
67564@@ -1154,7 +1154,7 @@ rcu_torture_printk(char *page)
67565 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
67566 if (i > 1) {
67567 cnt += sprintf(&page[cnt], "!!! ");
67568- atomic_inc(&n_rcu_torture_error);
67569+ atomic_inc_unchecked(&n_rcu_torture_error);
67570 WARN_ON_ONCE(1);
67571 }
67572 cnt += sprintf(&page[cnt], "Reader Pipe: ");
67573@@ -1168,7 +1168,7 @@ rcu_torture_printk(char *page)
67574 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
67575 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
67576 cnt += sprintf(&page[cnt], " %d",
67577- atomic_read(&rcu_torture_wcount[i]));
67578+ atomic_read_unchecked(&rcu_torture_wcount[i]));
67579 }
67580 cnt += sprintf(&page[cnt], "\n");
67581 if (cur_ops->stats)
67582@@ -1676,7 +1676,7 @@ rcu_torture_cleanup(void)
67583
67584 if (cur_ops->cleanup)
67585 cur_ops->cleanup();
67586- if (atomic_read(&n_rcu_torture_error))
67587+ if (atomic_read_unchecked(&n_rcu_torture_error))
67588 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
67589 else if (n_online_successes != n_online_attempts ||
67590 n_offline_successes != n_offline_attempts)
67591@@ -1744,17 +1744,17 @@ rcu_torture_init(void)
67592
67593 rcu_torture_current = NULL;
67594 rcu_torture_current_version = 0;
67595- atomic_set(&n_rcu_torture_alloc, 0);
67596- atomic_set(&n_rcu_torture_alloc_fail, 0);
67597- atomic_set(&n_rcu_torture_free, 0);
67598- atomic_set(&n_rcu_torture_mberror, 0);
67599- atomic_set(&n_rcu_torture_error, 0);
67600+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
67601+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
67602+ atomic_set_unchecked(&n_rcu_torture_free, 0);
67603+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
67604+ atomic_set_unchecked(&n_rcu_torture_error, 0);
67605 n_rcu_torture_boost_ktrerror = 0;
67606 n_rcu_torture_boost_rterror = 0;
67607 n_rcu_torture_boost_failure = 0;
67608 n_rcu_torture_boosts = 0;
67609 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
67610- atomic_set(&rcu_torture_wcount[i], 0);
67611+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
67612 for_each_possible_cpu(cpu) {
67613 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
67614 per_cpu(rcu_torture_count, cpu)[i] = 0;
67615diff --git a/kernel/rcutree.c b/kernel/rcutree.c
67616index d0c5baf..109b2e7 100644
67617--- a/kernel/rcutree.c
67618+++ b/kernel/rcutree.c
67619@@ -357,9 +357,9 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
67620 rcu_prepare_for_idle(smp_processor_id());
67621 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
67622 smp_mb__before_atomic_inc(); /* See above. */
67623- atomic_inc(&rdtp->dynticks);
67624+ atomic_inc_unchecked(&rdtp->dynticks);
67625 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
67626- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
67627+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
67628
67629 /*
67630 * The idle task is not permitted to enter the idle loop while
67631@@ -448,10 +448,10 @@ void rcu_irq_exit(void)
67632 static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
67633 {
67634 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
67635- atomic_inc(&rdtp->dynticks);
67636+ atomic_inc_unchecked(&rdtp->dynticks);
67637 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
67638 smp_mb__after_atomic_inc(); /* See above. */
67639- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
67640+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
67641 rcu_cleanup_after_idle(smp_processor_id());
67642 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
67643 if (!is_idle_task(current)) {
67644@@ -545,14 +545,14 @@ void rcu_nmi_enter(void)
67645 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
67646
67647 if (rdtp->dynticks_nmi_nesting == 0 &&
67648- (atomic_read(&rdtp->dynticks) & 0x1))
67649+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
67650 return;
67651 rdtp->dynticks_nmi_nesting++;
67652 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
67653- atomic_inc(&rdtp->dynticks);
67654+ atomic_inc_unchecked(&rdtp->dynticks);
67655 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
67656 smp_mb__after_atomic_inc(); /* See above. */
67657- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
67658+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
67659 }
67660
67661 /**
67662@@ -571,9 +571,9 @@ void rcu_nmi_exit(void)
67663 return;
67664 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
67665 smp_mb__before_atomic_inc(); /* See above. */
67666- atomic_inc(&rdtp->dynticks);
67667+ atomic_inc_unchecked(&rdtp->dynticks);
67668 smp_mb__after_atomic_inc(); /* Force delay to next write. */
67669- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
67670+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
67671 }
67672
67673 #ifdef CONFIG_PROVE_RCU
67674@@ -589,7 +589,7 @@ int rcu_is_cpu_idle(void)
67675 int ret;
67676
67677 preempt_disable();
67678- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
67679+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
67680 preempt_enable();
67681 return ret;
67682 }
67683@@ -659,7 +659,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
67684 */
67685 static int dyntick_save_progress_counter(struct rcu_data *rdp)
67686 {
67687- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
67688+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
67689 return (rdp->dynticks_snap & 0x1) == 0;
67690 }
67691
67692@@ -674,7 +674,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
67693 unsigned int curr;
67694 unsigned int snap;
67695
67696- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
67697+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
67698 snap = (unsigned int)rdp->dynticks_snap;
67699
67700 /*
67701@@ -704,10 +704,10 @@ static int jiffies_till_stall_check(void)
67702 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
67703 */
67704 if (till_stall_check < 3) {
67705- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
67706+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
67707 till_stall_check = 3;
67708 } else if (till_stall_check > 300) {
67709- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
67710+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
67711 till_stall_check = 300;
67712 }
67713 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
67714@@ -1766,7 +1766,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
67715 /*
67716 * Do RCU core processing for the current CPU.
67717 */
67718-static void rcu_process_callbacks(struct softirq_action *unused)
67719+static void rcu_process_callbacks(void)
67720 {
67721 trace_rcu_utilization("Start RCU core");
67722 __rcu_process_callbacks(&rcu_sched_state,
67723@@ -1949,8 +1949,8 @@ void synchronize_rcu_bh(void)
67724 }
67725 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
67726
67727-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
67728-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
67729+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
67730+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
67731
67732 static int synchronize_sched_expedited_cpu_stop(void *data)
67733 {
67734@@ -2011,7 +2011,7 @@ void synchronize_sched_expedited(void)
67735 int firstsnap, s, snap, trycount = 0;
67736
67737 /* Note that atomic_inc_return() implies full memory barrier. */
67738- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
67739+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
67740 get_online_cpus();
67741 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
67742
67743@@ -2033,7 +2033,7 @@ void synchronize_sched_expedited(void)
67744 }
67745
67746 /* Check to see if someone else did our work for us. */
67747- s = atomic_read(&sync_sched_expedited_done);
67748+ s = atomic_read_unchecked(&sync_sched_expedited_done);
67749 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
67750 smp_mb(); /* ensure test happens before caller kfree */
67751 return;
67752@@ -2048,7 +2048,7 @@ void synchronize_sched_expedited(void)
67753 * grace period works for us.
67754 */
67755 get_online_cpus();
67756- snap = atomic_read(&sync_sched_expedited_started);
67757+ snap = atomic_read_unchecked(&sync_sched_expedited_started);
67758 smp_mb(); /* ensure read is before try_stop_cpus(). */
67759 }
67760
67761@@ -2059,12 +2059,12 @@ void synchronize_sched_expedited(void)
67762 * than we did beat us to the punch.
67763 */
67764 do {
67765- s = atomic_read(&sync_sched_expedited_done);
67766+ s = atomic_read_unchecked(&sync_sched_expedited_done);
67767 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
67768 smp_mb(); /* ensure test happens before caller kfree */
67769 break;
67770 }
67771- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
67772+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
67773
67774 put_online_cpus();
67775 }
67776@@ -2262,7 +2262,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
67777 rdp->qlen = 0;
67778 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
67779 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
67780- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
67781+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
67782 rdp->cpu = cpu;
67783 rdp->rsp = rsp;
67784 raw_spin_unlock_irqrestore(&rnp->lock, flags);
67785@@ -2290,8 +2290,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
67786 rdp->n_force_qs_snap = rsp->n_force_qs;
67787 rdp->blimit = blimit;
67788 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
67789- atomic_set(&rdp->dynticks->dynticks,
67790- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
67791+ atomic_set_unchecked(&rdp->dynticks->dynticks,
67792+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
67793 rcu_prepare_for_idle_init(cpu);
67794 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
67795
67796diff --git a/kernel/rcutree.h b/kernel/rcutree.h
67797index cdd1be0..5b2efb4 100644
67798--- a/kernel/rcutree.h
67799+++ b/kernel/rcutree.h
67800@@ -87,7 +87,7 @@ struct rcu_dynticks {
67801 long long dynticks_nesting; /* Track irq/process nesting level. */
67802 /* Process level is worth LLONG_MAX/2. */
67803 int dynticks_nmi_nesting; /* Track NMI nesting level. */
67804- atomic_t dynticks; /* Even value for idle, else odd. */
67805+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
67806 };
67807
67808 /* RCU's kthread states for tracing. */
67809diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
67810index c023464..7f57225 100644
67811--- a/kernel/rcutree_plugin.h
67812+++ b/kernel/rcutree_plugin.h
67813@@ -909,7 +909,7 @@ void synchronize_rcu_expedited(void)
67814
67815 /* Clean up and exit. */
67816 smp_mb(); /* ensure expedited GP seen before counter increment. */
67817- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
67818+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
67819 unlock_mb_ret:
67820 mutex_unlock(&sync_rcu_preempt_exp_mutex);
67821 mb_ret:
67822diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
67823index ed459ed..a03c3fa 100644
67824--- a/kernel/rcutree_trace.c
67825+++ b/kernel/rcutree_trace.c
67826@@ -68,7 +68,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
67827 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
67828 rdp->qs_pending);
67829 seq_printf(m, " dt=%d/%llx/%d df=%lu",
67830- atomic_read(&rdp->dynticks->dynticks),
67831+ atomic_read_unchecked(&rdp->dynticks->dynticks),
67832 rdp->dynticks->dynticks_nesting,
67833 rdp->dynticks->dynticks_nmi_nesting,
67834 rdp->dynticks_fqs);
67835@@ -140,7 +140,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
67836 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
67837 rdp->qs_pending);
67838 seq_printf(m, ",%d,%llx,%d,%lu",
67839- atomic_read(&rdp->dynticks->dynticks),
67840+ atomic_read_unchecked(&rdp->dynticks->dynticks),
67841 rdp->dynticks->dynticks_nesting,
67842 rdp->dynticks->dynticks_nmi_nesting,
67843 rdp->dynticks_fqs);
67844diff --git a/kernel/resource.c b/kernel/resource.c
67845index 7e8ea66..1efd11f 100644
67846--- a/kernel/resource.c
67847+++ b/kernel/resource.c
67848@@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
67849
67850 static int __init ioresources_init(void)
67851 {
67852+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67853+#ifdef CONFIG_GRKERNSEC_PROC_USER
67854+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
67855+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
67856+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67857+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
67858+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
67859+#endif
67860+#else
67861 proc_create("ioports", 0, NULL, &proc_ioports_operations);
67862 proc_create("iomem", 0, NULL, &proc_iomem_operations);
67863+#endif
67864 return 0;
67865 }
67866 __initcall(ioresources_init);
67867diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
67868index 98ec494..4241d6d 100644
67869--- a/kernel/rtmutex-tester.c
67870+++ b/kernel/rtmutex-tester.c
67871@@ -20,7 +20,7 @@
67872 #define MAX_RT_TEST_MUTEXES 8
67873
67874 static spinlock_t rttest_lock;
67875-static atomic_t rttest_event;
67876+static atomic_unchecked_t rttest_event;
67877
67878 struct test_thread_data {
67879 int opcode;
67880@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67881
67882 case RTTEST_LOCKCONT:
67883 td->mutexes[td->opdata] = 1;
67884- td->event = atomic_add_return(1, &rttest_event);
67885+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67886 return 0;
67887
67888 case RTTEST_RESET:
67889@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67890 return 0;
67891
67892 case RTTEST_RESETEVENT:
67893- atomic_set(&rttest_event, 0);
67894+ atomic_set_unchecked(&rttest_event, 0);
67895 return 0;
67896
67897 default:
67898@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67899 return ret;
67900
67901 td->mutexes[id] = 1;
67902- td->event = atomic_add_return(1, &rttest_event);
67903+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67904 rt_mutex_lock(&mutexes[id]);
67905- td->event = atomic_add_return(1, &rttest_event);
67906+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67907 td->mutexes[id] = 4;
67908 return 0;
67909
67910@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67911 return ret;
67912
67913 td->mutexes[id] = 1;
67914- td->event = atomic_add_return(1, &rttest_event);
67915+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67916 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
67917- td->event = atomic_add_return(1, &rttest_event);
67918+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67919 td->mutexes[id] = ret ? 0 : 4;
67920 return ret ? -EINTR : 0;
67921
67922@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67923 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
67924 return ret;
67925
67926- td->event = atomic_add_return(1, &rttest_event);
67927+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67928 rt_mutex_unlock(&mutexes[id]);
67929- td->event = atomic_add_return(1, &rttest_event);
67930+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67931 td->mutexes[id] = 0;
67932 return 0;
67933
67934@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67935 break;
67936
67937 td->mutexes[dat] = 2;
67938- td->event = atomic_add_return(1, &rttest_event);
67939+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67940 break;
67941
67942 default:
67943@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67944 return;
67945
67946 td->mutexes[dat] = 3;
67947- td->event = atomic_add_return(1, &rttest_event);
67948+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67949 break;
67950
67951 case RTTEST_LOCKNOWAIT:
67952@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67953 return;
67954
67955 td->mutexes[dat] = 1;
67956- td->event = atomic_add_return(1, &rttest_event);
67957+ td->event = atomic_add_return_unchecked(1, &rttest_event);
67958 return;
67959
67960 default:
67961diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
67962index 0984a21..939f183 100644
67963--- a/kernel/sched/auto_group.c
67964+++ b/kernel/sched/auto_group.c
67965@@ -11,7 +11,7 @@
67966
67967 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
67968 static struct autogroup autogroup_default;
67969-static atomic_t autogroup_seq_nr;
67970+static atomic_unchecked_t autogroup_seq_nr;
67971
67972 void __init autogroup_init(struct task_struct *init_task)
67973 {
67974@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
67975
67976 kref_init(&ag->kref);
67977 init_rwsem(&ag->lock);
67978- ag->id = atomic_inc_return(&autogroup_seq_nr);
67979+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
67980 ag->tg = tg;
67981 #ifdef CONFIG_RT_GROUP_SCHED
67982 /*
67983diff --git a/kernel/sched/core.c b/kernel/sched/core.c
67984index 2000e06..79cf3d8 100644
67985--- a/kernel/sched/core.c
67986+++ b/kernel/sched/core.c
67987@@ -3907,6 +3907,8 @@ int can_nice(const struct task_struct *p, const int nice)
67988 /* convert nice value [19,-20] to rlimit style value [1,40] */
67989 int nice_rlim = 20 - nice;
67990
67991+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
67992+
67993 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
67994 capable(CAP_SYS_NICE));
67995 }
67996@@ -3940,7 +3942,8 @@ SYSCALL_DEFINE1(nice, int, increment)
67997 if (nice > 19)
67998 nice = 19;
67999
68000- if (increment < 0 && !can_nice(current, nice))
68001+ if (increment < 0 && (!can_nice(current, nice) ||
68002+ gr_handle_chroot_nice()))
68003 return -EPERM;
68004
68005 retval = security_task_setnice(current, nice);
68006@@ -4097,6 +4100,7 @@ recheck:
68007 unsigned long rlim_rtprio =
68008 task_rlimit(p, RLIMIT_RTPRIO);
68009
68010+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
68011 /* can't set/change the rt policy */
68012 if (policy != p->policy && !rlim_rtprio)
68013 return -EPERM;
68014diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
68015index e955364..eacd2a4 100644
68016--- a/kernel/sched/fair.c
68017+++ b/kernel/sched/fair.c
68018@@ -5107,7 +5107,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
68019 * run_rebalance_domains is triggered when needed from the scheduler tick.
68020 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
68021 */
68022-static void run_rebalance_domains(struct softirq_action *h)
68023+static void run_rebalance_domains(void)
68024 {
68025 int this_cpu = smp_processor_id();
68026 struct rq *this_rq = cpu_rq(this_cpu);
68027diff --git a/kernel/signal.c b/kernel/signal.c
68028index 17afcaf..4500b05 100644
68029--- a/kernel/signal.c
68030+++ b/kernel/signal.c
68031@@ -47,12 +47,12 @@ static struct kmem_cache *sigqueue_cachep;
68032
68033 int print_fatal_signals __read_mostly;
68034
68035-static void __user *sig_handler(struct task_struct *t, int sig)
68036+static __sighandler_t sig_handler(struct task_struct *t, int sig)
68037 {
68038 return t->sighand->action[sig - 1].sa.sa_handler;
68039 }
68040
68041-static int sig_handler_ignored(void __user *handler, int sig)
68042+static int sig_handler_ignored(__sighandler_t handler, int sig)
68043 {
68044 /* Is it explicitly or implicitly ignored? */
68045 return handler == SIG_IGN ||
68046@@ -61,7 +61,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
68047
68048 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
68049 {
68050- void __user *handler;
68051+ __sighandler_t handler;
68052
68053 handler = sig_handler(t, sig);
68054
68055@@ -365,6 +365,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
68056 atomic_inc(&user->sigpending);
68057 rcu_read_unlock();
68058
68059+ if (!override_rlimit)
68060+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
68061+
68062 if (override_rlimit ||
68063 atomic_read(&user->sigpending) <=
68064 task_rlimit(t, RLIMIT_SIGPENDING)) {
68065@@ -489,7 +492,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
68066
68067 int unhandled_signal(struct task_struct *tsk, int sig)
68068 {
68069- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
68070+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
68071 if (is_global_init(tsk))
68072 return 1;
68073 if (handler != SIG_IGN && handler != SIG_DFL)
68074@@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
68075 }
68076 }
68077
68078+ /* allow glibc communication via tgkill to other threads in our
68079+ thread group */
68080+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
68081+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
68082+ && gr_handle_signal(t, sig))
68083+ return -EPERM;
68084+
68085 return security_task_kill(t, info, sig, 0);
68086 }
68087
68088@@ -1204,7 +1214,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
68089 return send_signal(sig, info, p, 1);
68090 }
68091
68092-static int
68093+int
68094 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68095 {
68096 return send_signal(sig, info, t, 0);
68097@@ -1241,6 +1251,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68098 unsigned long int flags;
68099 int ret, blocked, ignored;
68100 struct k_sigaction *action;
68101+ int is_unhandled = 0;
68102
68103 spin_lock_irqsave(&t->sighand->siglock, flags);
68104 action = &t->sighand->action[sig-1];
68105@@ -1255,9 +1266,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68106 }
68107 if (action->sa.sa_handler == SIG_DFL)
68108 t->signal->flags &= ~SIGNAL_UNKILLABLE;
68109+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
68110+ is_unhandled = 1;
68111 ret = specific_send_sig_info(sig, info, t);
68112 spin_unlock_irqrestore(&t->sighand->siglock, flags);
68113
68114+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
68115+ normal operation */
68116+ if (is_unhandled) {
68117+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
68118+ gr_handle_crash(t, sig);
68119+ }
68120+
68121 return ret;
68122 }
68123
68124@@ -1324,8 +1344,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
68125 ret = check_kill_permission(sig, info, p);
68126 rcu_read_unlock();
68127
68128- if (!ret && sig)
68129+ if (!ret && sig) {
68130 ret = do_send_sig_info(sig, info, p, true);
68131+ if (!ret)
68132+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
68133+ }
68134
68135 return ret;
68136 }
68137@@ -2840,7 +2863,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
68138 int error = -ESRCH;
68139
68140 rcu_read_lock();
68141- p = find_task_by_vpid(pid);
68142+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68143+ /* allow glibc communication via tgkill to other threads in our
68144+ thread group */
68145+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
68146+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
68147+ p = find_task_by_vpid_unrestricted(pid);
68148+ else
68149+#endif
68150+ p = find_task_by_vpid(pid);
68151 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
68152 error = check_kill_permission(sig, info, p);
68153 /*
68154diff --git a/kernel/smp.c b/kernel/smp.c
68155index 2f8b10e..a41bc14 100644
68156--- a/kernel/smp.c
68157+++ b/kernel/smp.c
68158@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
68159 }
68160 EXPORT_SYMBOL(smp_call_function);
68161
68162-void ipi_call_lock(void)
68163+void ipi_call_lock(void) __acquires(call_function.lock)
68164 {
68165 raw_spin_lock(&call_function.lock);
68166 }
68167
68168-void ipi_call_unlock(void)
68169+void ipi_call_unlock(void) __releases(call_function.lock)
68170 {
68171 raw_spin_unlock(&call_function.lock);
68172 }
68173
68174-void ipi_call_lock_irq(void)
68175+void ipi_call_lock_irq(void) __acquires(call_function.lock)
68176 {
68177 raw_spin_lock_irq(&call_function.lock);
68178 }
68179
68180-void ipi_call_unlock_irq(void)
68181+void ipi_call_unlock_irq(void) __releases(call_function.lock)
68182 {
68183 raw_spin_unlock_irq(&call_function.lock);
68184 }
68185diff --git a/kernel/softirq.c b/kernel/softirq.c
68186index 671f959..91c51cb 100644
68187--- a/kernel/softirq.c
68188+++ b/kernel/softirq.c
68189@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
68190
68191 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
68192
68193-char *softirq_to_name[NR_SOFTIRQS] = {
68194+const char * const softirq_to_name[NR_SOFTIRQS] = {
68195 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
68196 "TASKLET", "SCHED", "HRTIMER", "RCU"
68197 };
68198@@ -235,7 +235,7 @@ restart:
68199 kstat_incr_softirqs_this_cpu(vec_nr);
68200
68201 trace_softirq_entry(vec_nr);
68202- h->action(h);
68203+ h->action();
68204 trace_softirq_exit(vec_nr);
68205 if (unlikely(prev_count != preempt_count())) {
68206 printk(KERN_ERR "huh, entered softirq %u %s %p"
68207@@ -381,9 +381,11 @@ void __raise_softirq_irqoff(unsigned int nr)
68208 or_softirq_pending(1UL << nr);
68209 }
68210
68211-void open_softirq(int nr, void (*action)(struct softirq_action *))
68212+void open_softirq(int nr, void (*action)(void))
68213 {
68214- softirq_vec[nr].action = action;
68215+ pax_open_kernel();
68216+ *(void **)&softirq_vec[nr].action = action;
68217+ pax_close_kernel();
68218 }
68219
68220 /*
68221@@ -437,7 +439,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
68222
68223 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
68224
68225-static void tasklet_action(struct softirq_action *a)
68226+static void tasklet_action(void)
68227 {
68228 struct tasklet_struct *list;
68229
68230@@ -472,7 +474,7 @@ static void tasklet_action(struct softirq_action *a)
68231 }
68232 }
68233
68234-static void tasklet_hi_action(struct softirq_action *a)
68235+static void tasklet_hi_action(void)
68236 {
68237 struct tasklet_struct *list;
68238
68239diff --git a/kernel/sys.c b/kernel/sys.c
68240index e7006eb..8fb7c51 100644
68241--- a/kernel/sys.c
68242+++ b/kernel/sys.c
68243@@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
68244 error = -EACCES;
68245 goto out;
68246 }
68247+
68248+ if (gr_handle_chroot_setpriority(p, niceval)) {
68249+ error = -EACCES;
68250+ goto out;
68251+ }
68252+
68253 no_nice = security_task_setnice(p, niceval);
68254 if (no_nice) {
68255 error = no_nice;
68256@@ -581,6 +587,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
68257 goto error;
68258 }
68259
68260+ if (gr_check_group_change(new->gid, new->egid, -1))
68261+ goto error;
68262+
68263 if (rgid != (gid_t) -1 ||
68264 (egid != (gid_t) -1 && egid != old->gid))
68265 new->sgid = new->egid;
68266@@ -610,6 +619,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
68267 old = current_cred();
68268
68269 retval = -EPERM;
68270+
68271+ if (gr_check_group_change(gid, gid, gid))
68272+ goto error;
68273+
68274 if (nsown_capable(CAP_SETGID))
68275 new->gid = new->egid = new->sgid = new->fsgid = gid;
68276 else if (gid == old->gid || gid == old->sgid)
68277@@ -627,7 +640,7 @@ error:
68278 /*
68279 * change the user struct in a credentials set to match the new UID
68280 */
68281-static int set_user(struct cred *new)
68282+int set_user(struct cred *new)
68283 {
68284 struct user_struct *new_user;
68285
68286@@ -697,6 +710,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
68287 goto error;
68288 }
68289
68290+ if (gr_check_user_change(new->uid, new->euid, -1))
68291+ goto error;
68292+
68293 if (new->uid != old->uid) {
68294 retval = set_user(new);
68295 if (retval < 0)
68296@@ -741,6 +757,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
68297 old = current_cred();
68298
68299 retval = -EPERM;
68300+
68301+ if (gr_check_crash_uid(uid))
68302+ goto error;
68303+ if (gr_check_user_change(uid, uid, uid))
68304+ goto error;
68305+
68306 if (nsown_capable(CAP_SETUID)) {
68307 new->suid = new->uid = uid;
68308 if (uid != old->uid) {
68309@@ -795,6 +817,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
68310 goto error;
68311 }
68312
68313+ if (gr_check_user_change(ruid, euid, -1))
68314+ goto error;
68315+
68316 if (ruid != (uid_t) -1) {
68317 new->uid = ruid;
68318 if (ruid != old->uid) {
68319@@ -859,6 +884,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
68320 goto error;
68321 }
68322
68323+ if (gr_check_group_change(rgid, egid, -1))
68324+ goto error;
68325+
68326 if (rgid != (gid_t) -1)
68327 new->gid = rgid;
68328 if (egid != (gid_t) -1)
68329@@ -905,6 +933,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
68330 old = current_cred();
68331 old_fsuid = old->fsuid;
68332
68333+ if (gr_check_user_change(-1, -1, uid))
68334+ goto error;
68335+
68336 if (uid == old->uid || uid == old->euid ||
68337 uid == old->suid || uid == old->fsuid ||
68338 nsown_capable(CAP_SETUID)) {
68339@@ -915,6 +946,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
68340 }
68341 }
68342
68343+error:
68344 abort_creds(new);
68345 return old_fsuid;
68346
68347@@ -941,12 +973,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
68348 if (gid == old->gid || gid == old->egid ||
68349 gid == old->sgid || gid == old->fsgid ||
68350 nsown_capable(CAP_SETGID)) {
68351+ if (gr_check_group_change(-1, -1, gid))
68352+ goto error;
68353+
68354 if (gid != old_fsgid) {
68355 new->fsgid = gid;
68356 goto change_okay;
68357 }
68358 }
68359
68360+error:
68361 abort_creds(new);
68362 return old_fsgid;
68363
68364@@ -1198,7 +1234,10 @@ static int override_release(char __user *release, int len)
68365 }
68366 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
68367 snprintf(buf, len, "2.6.%u%s", v, rest);
68368- ret = copy_to_user(release, buf, len);
68369+ if (len > sizeof(buf))
68370+ ret = -EFAULT;
68371+ else
68372+ ret = copy_to_user(release, buf, len);
68373 }
68374 return ret;
68375 }
68376@@ -1252,19 +1291,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
68377 return -EFAULT;
68378
68379 down_read(&uts_sem);
68380- error = __copy_to_user(&name->sysname, &utsname()->sysname,
68381+ error = __copy_to_user(name->sysname, &utsname()->sysname,
68382 __OLD_UTS_LEN);
68383 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
68384- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
68385+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
68386 __OLD_UTS_LEN);
68387 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
68388- error |= __copy_to_user(&name->release, &utsname()->release,
68389+ error |= __copy_to_user(name->release, &utsname()->release,
68390 __OLD_UTS_LEN);
68391 error |= __put_user(0, name->release + __OLD_UTS_LEN);
68392- error |= __copy_to_user(&name->version, &utsname()->version,
68393+ error |= __copy_to_user(name->version, &utsname()->version,
68394 __OLD_UTS_LEN);
68395 error |= __put_user(0, name->version + __OLD_UTS_LEN);
68396- error |= __copy_to_user(&name->machine, &utsname()->machine,
68397+ error |= __copy_to_user(name->machine, &utsname()->machine,
68398 __OLD_UTS_LEN);
68399 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
68400 up_read(&uts_sem);
68401@@ -1847,7 +1886,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
68402 error = get_dumpable(me->mm);
68403 break;
68404 case PR_SET_DUMPABLE:
68405- if (arg2 < 0 || arg2 > 1) {
68406+ if (arg2 > 1) {
68407 error = -EINVAL;
68408 break;
68409 }
68410diff --git a/kernel/sysctl.c b/kernel/sysctl.c
68411index 4ab1187..0b75ced 100644
68412--- a/kernel/sysctl.c
68413+++ b/kernel/sysctl.c
68414@@ -91,7 +91,6 @@
68415
68416
68417 #if defined(CONFIG_SYSCTL)
68418-
68419 /* External variables not in a header file. */
68420 extern int sysctl_overcommit_memory;
68421 extern int sysctl_overcommit_ratio;
68422@@ -169,10 +168,8 @@ static int proc_taint(struct ctl_table *table, int write,
68423 void __user *buffer, size_t *lenp, loff_t *ppos);
68424 #endif
68425
68426-#ifdef CONFIG_PRINTK
68427 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
68428 void __user *buffer, size_t *lenp, loff_t *ppos);
68429-#endif
68430
68431 #ifdef CONFIG_MAGIC_SYSRQ
68432 /* Note: sysrq code uses it's own private copy */
68433@@ -196,6 +193,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
68434
68435 #endif
68436
68437+extern struct ctl_table grsecurity_table[];
68438+
68439 static struct ctl_table kern_table[];
68440 static struct ctl_table vm_table[];
68441 static struct ctl_table fs_table[];
68442@@ -210,6 +209,20 @@ extern struct ctl_table epoll_table[];
68443 int sysctl_legacy_va_layout;
68444 #endif
68445
68446+#ifdef CONFIG_PAX_SOFTMODE
68447+static ctl_table pax_table[] = {
68448+ {
68449+ .procname = "softmode",
68450+ .data = &pax_softmode,
68451+ .maxlen = sizeof(unsigned int),
68452+ .mode = 0600,
68453+ .proc_handler = &proc_dointvec,
68454+ },
68455+
68456+ { }
68457+};
68458+#endif
68459+
68460 /* The default sysctl tables: */
68461
68462 static struct ctl_table sysctl_base_table[] = {
68463@@ -256,6 +269,22 @@ static int max_extfrag_threshold = 1000;
68464 #endif
68465
68466 static struct ctl_table kern_table[] = {
68467+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
68468+ {
68469+ .procname = "grsecurity",
68470+ .mode = 0500,
68471+ .child = grsecurity_table,
68472+ },
68473+#endif
68474+
68475+#ifdef CONFIG_PAX_SOFTMODE
68476+ {
68477+ .procname = "pax",
68478+ .mode = 0500,
68479+ .child = pax_table,
68480+ },
68481+#endif
68482+
68483 {
68484 .procname = "sched_child_runs_first",
68485 .data = &sysctl_sched_child_runs_first,
68486@@ -540,7 +569,7 @@ static struct ctl_table kern_table[] = {
68487 .data = &modprobe_path,
68488 .maxlen = KMOD_PATH_LEN,
68489 .mode = 0644,
68490- .proc_handler = proc_dostring,
68491+ .proc_handler = proc_dostring_modpriv,
68492 },
68493 {
68494 .procname = "modules_disabled",
68495@@ -707,16 +736,20 @@ static struct ctl_table kern_table[] = {
68496 .extra1 = &zero,
68497 .extra2 = &one,
68498 },
68499+#endif
68500 {
68501 .procname = "kptr_restrict",
68502 .data = &kptr_restrict,
68503 .maxlen = sizeof(int),
68504 .mode = 0644,
68505 .proc_handler = proc_dointvec_minmax_sysadmin,
68506+#ifdef CONFIG_GRKERNSEC_HIDESYM
68507+ .extra1 = &two,
68508+#else
68509 .extra1 = &zero,
68510+#endif
68511 .extra2 = &two,
68512 },
68513-#endif
68514 {
68515 .procname = "ngroups_max",
68516 .data = &ngroups_max,
68517@@ -1215,6 +1248,13 @@ static struct ctl_table vm_table[] = {
68518 .proc_handler = proc_dointvec_minmax,
68519 .extra1 = &zero,
68520 },
68521+ {
68522+ .procname = "heap_stack_gap",
68523+ .data = &sysctl_heap_stack_gap,
68524+ .maxlen = sizeof(sysctl_heap_stack_gap),
68525+ .mode = 0644,
68526+ .proc_handler = proc_doulongvec_minmax,
68527+ },
68528 #else
68529 {
68530 .procname = "nr_trim_pages",
68531@@ -1645,6 +1685,16 @@ int proc_dostring(struct ctl_table *table, int write,
68532 buffer, lenp, ppos);
68533 }
68534
68535+int proc_dostring_modpriv(struct ctl_table *table, int write,
68536+ void __user *buffer, size_t *lenp, loff_t *ppos)
68537+{
68538+ if (write && !capable(CAP_SYS_MODULE))
68539+ return -EPERM;
68540+
68541+ return _proc_do_string(table->data, table->maxlen, write,
68542+ buffer, lenp, ppos);
68543+}
68544+
68545 static size_t proc_skip_spaces(char **buf)
68546 {
68547 size_t ret;
68548@@ -1750,6 +1800,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
68549 len = strlen(tmp);
68550 if (len > *size)
68551 len = *size;
68552+ if (len > sizeof(tmp))
68553+ len = sizeof(tmp);
68554 if (copy_to_user(*buf, tmp, len))
68555 return -EFAULT;
68556 *size -= len;
68557@@ -1942,7 +1994,6 @@ static int proc_taint(struct ctl_table *table, int write,
68558 return err;
68559 }
68560
68561-#ifdef CONFIG_PRINTK
68562 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
68563 void __user *buffer, size_t *lenp, loff_t *ppos)
68564 {
68565@@ -1951,7 +2002,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
68566
68567 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
68568 }
68569-#endif
68570
68571 struct do_proc_dointvec_minmax_conv_param {
68572 int *min;
68573@@ -2066,8 +2116,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
68574 *i = val;
68575 } else {
68576 val = convdiv * (*i) / convmul;
68577- if (!first)
68578+ if (!first) {
68579 err = proc_put_char(&buffer, &left, '\t');
68580+ if (err)
68581+ break;
68582+ }
68583 err = proc_put_long(&buffer, &left, val, false);
68584 if (err)
68585 break;
68586@@ -2459,6 +2512,12 @@ int proc_dostring(struct ctl_table *table, int write,
68587 return -ENOSYS;
68588 }
68589
68590+int proc_dostring_modpriv(struct ctl_table *table, int write,
68591+ void __user *buffer, size_t *lenp, loff_t *ppos)
68592+{
68593+ return -ENOSYS;
68594+}
68595+
68596 int proc_dointvec(struct ctl_table *table, int write,
68597 void __user *buffer, size_t *lenp, loff_t *ppos)
68598 {
68599@@ -2515,5 +2574,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
68600 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
68601 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
68602 EXPORT_SYMBOL(proc_dostring);
68603+EXPORT_SYMBOL(proc_dostring_modpriv);
68604 EXPORT_SYMBOL(proc_doulongvec_minmax);
68605 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
68606diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
68607index a650694..aaeeb20 100644
68608--- a/kernel/sysctl_binary.c
68609+++ b/kernel/sysctl_binary.c
68610@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
68611 int i;
68612
68613 set_fs(KERNEL_DS);
68614- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
68615+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
68616 set_fs(old_fs);
68617 if (result < 0)
68618 goto out_kfree;
68619@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
68620 }
68621
68622 set_fs(KERNEL_DS);
68623- result = vfs_write(file, buffer, str - buffer, &pos);
68624+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
68625 set_fs(old_fs);
68626 if (result < 0)
68627 goto out_kfree;
68628@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
68629 int i;
68630
68631 set_fs(KERNEL_DS);
68632- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
68633+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
68634 set_fs(old_fs);
68635 if (result < 0)
68636 goto out_kfree;
68637@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
68638 }
68639
68640 set_fs(KERNEL_DS);
68641- result = vfs_write(file, buffer, str - buffer, &pos);
68642+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
68643 set_fs(old_fs);
68644 if (result < 0)
68645 goto out_kfree;
68646@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
68647 int i;
68648
68649 set_fs(KERNEL_DS);
68650- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
68651+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
68652 set_fs(old_fs);
68653 if (result < 0)
68654 goto out;
68655@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
68656 __le16 dnaddr;
68657
68658 set_fs(KERNEL_DS);
68659- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
68660+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
68661 set_fs(old_fs);
68662 if (result < 0)
68663 goto out;
68664@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
68665 le16_to_cpu(dnaddr) & 0x3ff);
68666
68667 set_fs(KERNEL_DS);
68668- result = vfs_write(file, buf, len, &pos);
68669+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
68670 set_fs(old_fs);
68671 if (result < 0)
68672 goto out;
68673diff --git a/kernel/taskstats.c b/kernel/taskstats.c
68674index e660464..c8b9e67 100644
68675--- a/kernel/taskstats.c
68676+++ b/kernel/taskstats.c
68677@@ -27,9 +27,12 @@
68678 #include <linux/cgroup.h>
68679 #include <linux/fs.h>
68680 #include <linux/file.h>
68681+#include <linux/grsecurity.h>
68682 #include <net/genetlink.h>
68683 #include <linux/atomic.h>
68684
68685+extern int gr_is_taskstats_denied(int pid);
68686+
68687 /*
68688 * Maximum length of a cpumask that can be specified in
68689 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
68690@@ -556,6 +559,9 @@ err:
68691
68692 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
68693 {
68694+ if (gr_is_taskstats_denied(current->pid))
68695+ return -EACCES;
68696+
68697 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
68698 return cmd_attr_register_cpumask(info);
68699 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
68700diff --git a/kernel/time.c b/kernel/time.c
68701index ba744cf..267b7c5 100644
68702--- a/kernel/time.c
68703+++ b/kernel/time.c
68704@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
68705 return error;
68706
68707 if (tz) {
68708+ /* we log in do_settimeofday called below, so don't log twice
68709+ */
68710+ if (!tv)
68711+ gr_log_timechange();
68712+
68713 sys_tz = *tz;
68714 update_vsyscall_tz();
68715 if (firsttime) {
68716diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
68717index 8a538c5..def79d4 100644
68718--- a/kernel/time/alarmtimer.c
68719+++ b/kernel/time/alarmtimer.c
68720@@ -779,7 +779,7 @@ static int __init alarmtimer_init(void)
68721 struct platform_device *pdev;
68722 int error = 0;
68723 int i;
68724- struct k_clock alarm_clock = {
68725+ static struct k_clock alarm_clock = {
68726 .clock_getres = alarm_clock_getres,
68727 .clock_get = alarm_clock_get,
68728 .timer_create = alarm_timer_create,
68729diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
68730index f113755..ec24223 100644
68731--- a/kernel/time/tick-broadcast.c
68732+++ b/kernel/time/tick-broadcast.c
68733@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
68734 * then clear the broadcast bit.
68735 */
68736 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
68737- int cpu = smp_processor_id();
68738+ cpu = smp_processor_id();
68739
68740 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
68741 tick_broadcast_clear_oneshot(cpu);
68742diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
68743index d42574df..247414c 100644
68744--- a/kernel/time/timekeeping.c
68745+++ b/kernel/time/timekeeping.c
68746@@ -14,6 +14,7 @@
68747 #include <linux/init.h>
68748 #include <linux/mm.h>
68749 #include <linux/sched.h>
68750+#include <linux/grsecurity.h>
68751 #include <linux/syscore_ops.h>
68752 #include <linux/clocksource.h>
68753 #include <linux/jiffies.h>
68754@@ -373,6 +374,8 @@ int do_settimeofday(const struct timespec *tv)
68755 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
68756 return -EINVAL;
68757
68758+ gr_log_timechange();
68759+
68760 write_seqlock_irqsave(&timekeeper.lock, flags);
68761
68762 timekeeping_forward_now();
68763diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
68764index 3258455..f35227d 100644
68765--- a/kernel/time/timer_list.c
68766+++ b/kernel/time/timer_list.c
68767@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
68768
68769 static void print_name_offset(struct seq_file *m, void *sym)
68770 {
68771+#ifdef CONFIG_GRKERNSEC_HIDESYM
68772+ SEQ_printf(m, "<%p>", NULL);
68773+#else
68774 char symname[KSYM_NAME_LEN];
68775
68776 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
68777 SEQ_printf(m, "<%pK>", sym);
68778 else
68779 SEQ_printf(m, "%s", symname);
68780+#endif
68781 }
68782
68783 static void
68784@@ -112,7 +116,11 @@ next_one:
68785 static void
68786 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
68787 {
68788+#ifdef CONFIG_GRKERNSEC_HIDESYM
68789+ SEQ_printf(m, " .base: %p\n", NULL);
68790+#else
68791 SEQ_printf(m, " .base: %pK\n", base);
68792+#endif
68793 SEQ_printf(m, " .index: %d\n",
68794 base->index);
68795 SEQ_printf(m, " .resolution: %Lu nsecs\n",
68796@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
68797 {
68798 struct proc_dir_entry *pe;
68799
68800+#ifdef CONFIG_GRKERNSEC_PROC_ADD
68801+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
68802+#else
68803 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
68804+#endif
68805 if (!pe)
68806 return -ENOMEM;
68807 return 0;
68808diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
68809index 0b537f2..9e71eca 100644
68810--- a/kernel/time/timer_stats.c
68811+++ b/kernel/time/timer_stats.c
68812@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
68813 static unsigned long nr_entries;
68814 static struct entry entries[MAX_ENTRIES];
68815
68816-static atomic_t overflow_count;
68817+static atomic_unchecked_t overflow_count;
68818
68819 /*
68820 * The entries are in a hash-table, for fast lookup:
68821@@ -140,7 +140,7 @@ static void reset_entries(void)
68822 nr_entries = 0;
68823 memset(entries, 0, sizeof(entries));
68824 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
68825- atomic_set(&overflow_count, 0);
68826+ atomic_set_unchecked(&overflow_count, 0);
68827 }
68828
68829 static struct entry *alloc_entry(void)
68830@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
68831 if (likely(entry))
68832 entry->count++;
68833 else
68834- atomic_inc(&overflow_count);
68835+ atomic_inc_unchecked(&overflow_count);
68836
68837 out_unlock:
68838 raw_spin_unlock_irqrestore(lock, flags);
68839@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
68840
68841 static void print_name_offset(struct seq_file *m, unsigned long addr)
68842 {
68843+#ifdef CONFIG_GRKERNSEC_HIDESYM
68844+ seq_printf(m, "<%p>", NULL);
68845+#else
68846 char symname[KSYM_NAME_LEN];
68847
68848 if (lookup_symbol_name(addr, symname) < 0)
68849 seq_printf(m, "<%p>", (void *)addr);
68850 else
68851 seq_printf(m, "%s", symname);
68852+#endif
68853 }
68854
68855 static int tstats_show(struct seq_file *m, void *v)
68856@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
68857
68858 seq_puts(m, "Timer Stats Version: v0.2\n");
68859 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
68860- if (atomic_read(&overflow_count))
68861+ if (atomic_read_unchecked(&overflow_count))
68862 seq_printf(m, "Overflow: %d entries\n",
68863- atomic_read(&overflow_count));
68864+ atomic_read_unchecked(&overflow_count));
68865
68866 for (i = 0; i < nr_entries; i++) {
68867 entry = entries + i;
68868@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
68869 {
68870 struct proc_dir_entry *pe;
68871
68872+#ifdef CONFIG_GRKERNSEC_PROC_ADD
68873+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
68874+#else
68875 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
68876+#endif
68877 if (!pe)
68878 return -ENOMEM;
68879 return 0;
68880diff --git a/kernel/timer.c b/kernel/timer.c
68881index a297ffc..5e16b0b 100644
68882--- a/kernel/timer.c
68883+++ b/kernel/timer.c
68884@@ -1354,7 +1354,7 @@ void update_process_times(int user_tick)
68885 /*
68886 * This function runs timers and the timer-tq in bottom half context.
68887 */
68888-static void run_timer_softirq(struct softirq_action *h)
68889+static void run_timer_softirq(void)
68890 {
68891 struct tvec_base *base = __this_cpu_read(tvec_bases);
68892
68893diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
68894index c0bd030..62a1927 100644
68895--- a/kernel/trace/blktrace.c
68896+++ b/kernel/trace/blktrace.c
68897@@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
68898 struct blk_trace *bt = filp->private_data;
68899 char buf[16];
68900
68901- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
68902+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
68903
68904 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
68905 }
68906@@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
68907 return 1;
68908
68909 bt = buf->chan->private_data;
68910- atomic_inc(&bt->dropped);
68911+ atomic_inc_unchecked(&bt->dropped);
68912 return 0;
68913 }
68914
68915@@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
68916
68917 bt->dir = dir;
68918 bt->dev = dev;
68919- atomic_set(&bt->dropped, 0);
68920+ atomic_set_unchecked(&bt->dropped, 0);
68921
68922 ret = -EIO;
68923 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
68924diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
68925index 0fa92f6..89950b2 100644
68926--- a/kernel/trace/ftrace.c
68927+++ b/kernel/trace/ftrace.c
68928@@ -1800,12 +1800,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
68929 if (unlikely(ftrace_disabled))
68930 return 0;
68931
68932+ ret = ftrace_arch_code_modify_prepare();
68933+ FTRACE_WARN_ON(ret);
68934+ if (ret)
68935+ return 0;
68936+
68937 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
68938+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
68939 if (ret) {
68940 ftrace_bug(ret, ip);
68941- return 0;
68942 }
68943- return 1;
68944+ return ret ? 0 : 1;
68945 }
68946
68947 /*
68948@@ -2917,7 +2922,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
68949
68950 int
68951 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
68952- void *data)
68953+ void *data)
68954 {
68955 struct ftrace_func_probe *entry;
68956 struct ftrace_page *pg;
68957diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
68958index 2a22255..cdcdd06 100644
68959--- a/kernel/trace/trace.c
68960+++ b/kernel/trace/trace.c
68961@@ -4312,10 +4312,9 @@ static const struct file_operations tracing_dyn_info_fops = {
68962 };
68963 #endif
68964
68965-static struct dentry *d_tracer;
68966-
68967 struct dentry *tracing_init_dentry(void)
68968 {
68969+ static struct dentry *d_tracer;
68970 static int once;
68971
68972 if (d_tracer)
68973@@ -4335,10 +4334,9 @@ struct dentry *tracing_init_dentry(void)
68974 return d_tracer;
68975 }
68976
68977-static struct dentry *d_percpu;
68978-
68979 struct dentry *tracing_dentry_percpu(void)
68980 {
68981+ static struct dentry *d_percpu;
68982 static int once;
68983 struct dentry *d_tracer;
68984
68985diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
68986index 29111da..d190fe2 100644
68987--- a/kernel/trace/trace_events.c
68988+++ b/kernel/trace/trace_events.c
68989@@ -1308,10 +1308,6 @@ static LIST_HEAD(ftrace_module_file_list);
68990 struct ftrace_module_file_ops {
68991 struct list_head list;
68992 struct module *mod;
68993- struct file_operations id;
68994- struct file_operations enable;
68995- struct file_operations format;
68996- struct file_operations filter;
68997 };
68998
68999 static struct ftrace_module_file_ops *
69000@@ -1332,17 +1328,12 @@ trace_create_file_ops(struct module *mod)
69001
69002 file_ops->mod = mod;
69003
69004- file_ops->id = ftrace_event_id_fops;
69005- file_ops->id.owner = mod;
69006-
69007- file_ops->enable = ftrace_enable_fops;
69008- file_ops->enable.owner = mod;
69009-
69010- file_ops->filter = ftrace_event_filter_fops;
69011- file_ops->filter.owner = mod;
69012-
69013- file_ops->format = ftrace_event_format_fops;
69014- file_ops->format.owner = mod;
69015+ pax_open_kernel();
69016+ *(void **)&mod->trace_id.owner = mod;
69017+ *(void **)&mod->trace_enable.owner = mod;
69018+ *(void **)&mod->trace_filter.owner = mod;
69019+ *(void **)&mod->trace_format.owner = mod;
69020+ pax_close_kernel();
69021
69022 list_add(&file_ops->list, &ftrace_module_file_list);
69023
69024@@ -1366,8 +1357,8 @@ static void trace_module_add_events(struct module *mod)
69025
69026 for_each_event(call, start, end) {
69027 __trace_add_event_call(*call, mod,
69028- &file_ops->id, &file_ops->enable,
69029- &file_ops->filter, &file_ops->format);
69030+ &mod->trace_id, &mod->trace_enable,
69031+ &mod->trace_filter, &mod->trace_format);
69032 }
69033 }
69034
69035diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
69036index 580a05e..9b31acb 100644
69037--- a/kernel/trace/trace_kprobe.c
69038+++ b/kernel/trace/trace_kprobe.c
69039@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69040 long ret;
69041 int maxlen = get_rloc_len(*(u32 *)dest);
69042 u8 *dst = get_rloc_data(dest);
69043- u8 *src = addr;
69044+ const u8 __user *src = (const u8 __force_user *)addr;
69045 mm_segment_t old_fs = get_fs();
69046 if (!maxlen)
69047 return;
69048@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69049 pagefault_disable();
69050 do
69051 ret = __copy_from_user_inatomic(dst++, src++, 1);
69052- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
69053+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
69054 dst[-1] = '\0';
69055 pagefault_enable();
69056 set_fs(old_fs);
69057@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69058 ((u8 *)get_rloc_data(dest))[0] = '\0';
69059 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
69060 } else
69061- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
69062+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
69063 get_rloc_offs(*(u32 *)dest));
69064 }
69065 /* Return the length of string -- including null terminal byte */
69066@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
69067 set_fs(KERNEL_DS);
69068 pagefault_disable();
69069 do {
69070- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
69071+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
69072 len++;
69073 } while (c && ret == 0 && len < MAX_STRING_SIZE);
69074 pagefault_enable();
69075diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
69076index fd3c8aa..5f324a6 100644
69077--- a/kernel/trace/trace_mmiotrace.c
69078+++ b/kernel/trace/trace_mmiotrace.c
69079@@ -24,7 +24,7 @@ struct header_iter {
69080 static struct trace_array *mmio_trace_array;
69081 static bool overrun_detected;
69082 static unsigned long prev_overruns;
69083-static atomic_t dropped_count;
69084+static atomic_unchecked_t dropped_count;
69085
69086 static void mmio_reset_data(struct trace_array *tr)
69087 {
69088@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
69089
69090 static unsigned long count_overruns(struct trace_iterator *iter)
69091 {
69092- unsigned long cnt = atomic_xchg(&dropped_count, 0);
69093+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
69094 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
69095
69096 if (over > prev_overruns)
69097@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
69098 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
69099 sizeof(*entry), 0, pc);
69100 if (!event) {
69101- atomic_inc(&dropped_count);
69102+ atomic_inc_unchecked(&dropped_count);
69103 return;
69104 }
69105 entry = ring_buffer_event_data(event);
69106@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
69107 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
69108 sizeof(*entry), 0, pc);
69109 if (!event) {
69110- atomic_inc(&dropped_count);
69111+ atomic_inc_unchecked(&dropped_count);
69112 return;
69113 }
69114 entry = ring_buffer_event_data(event);
69115diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
69116index df611a0..10d8b32 100644
69117--- a/kernel/trace/trace_output.c
69118+++ b/kernel/trace/trace_output.c
69119@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
69120
69121 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
69122 if (!IS_ERR(p)) {
69123- p = mangle_path(s->buffer + s->len, p, "\n");
69124+ p = mangle_path(s->buffer + s->len, p, "\n\\");
69125 if (p) {
69126 s->len = p - s->buffer;
69127 return 1;
69128diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
69129index d4545f4..a9010a1 100644
69130--- a/kernel/trace/trace_stack.c
69131+++ b/kernel/trace/trace_stack.c
69132@@ -53,7 +53,7 @@ static inline void check_stack(void)
69133 return;
69134
69135 /* we do not handle interrupt stacks yet */
69136- if (!object_is_on_stack(&this_size))
69137+ if (!object_starts_on_stack(&this_size))
69138 return;
69139
69140 local_irq_save(flags);
69141diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
69142index 209b379..7f76423 100644
69143--- a/kernel/trace/trace_workqueue.c
69144+++ b/kernel/trace/trace_workqueue.c
69145@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
69146 int cpu;
69147 pid_t pid;
69148 /* Can be inserted from interrupt or user context, need to be atomic */
69149- atomic_t inserted;
69150+ atomic_unchecked_t inserted;
69151 /*
69152 * Don't need to be atomic, works are serialized in a single workqueue thread
69153 * on a single CPU.
69154@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
69155 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
69156 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
69157 if (node->pid == wq_thread->pid) {
69158- atomic_inc(&node->inserted);
69159+ atomic_inc_unchecked(&node->inserted);
69160 goto found;
69161 }
69162 }
69163@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
69164 tsk = get_pid_task(pid, PIDTYPE_PID);
69165 if (tsk) {
69166 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
69167- atomic_read(&cws->inserted), cws->executed,
69168+ atomic_read_unchecked(&cws->inserted), cws->executed,
69169 tsk->comm);
69170 put_task_struct(tsk);
69171 }
69172diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
69173index 6777153..8519f60 100644
69174--- a/lib/Kconfig.debug
69175+++ b/lib/Kconfig.debug
69176@@ -1132,6 +1132,7 @@ config LATENCYTOP
69177 depends on DEBUG_KERNEL
69178 depends on STACKTRACE_SUPPORT
69179 depends on PROC_FS
69180+ depends on !GRKERNSEC_HIDESYM
69181 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
69182 select KALLSYMS
69183 select KALLSYMS_ALL
69184diff --git a/lib/bitmap.c b/lib/bitmap.c
69185index b5a8b6a..a69623c 100644
69186--- a/lib/bitmap.c
69187+++ b/lib/bitmap.c
69188@@ -421,7 +421,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
69189 {
69190 int c, old_c, totaldigits, ndigits, nchunks, nbits;
69191 u32 chunk;
69192- const char __user __force *ubuf = (const char __user __force *)buf;
69193+ const char __user *ubuf = (const char __force_user *)buf;
69194
69195 bitmap_zero(maskp, nmaskbits);
69196
69197@@ -506,7 +506,7 @@ int bitmap_parse_user(const char __user *ubuf,
69198 {
69199 if (!access_ok(VERIFY_READ, ubuf, ulen))
69200 return -EFAULT;
69201- return __bitmap_parse((const char __force *)ubuf,
69202+ return __bitmap_parse((const char __force_kernel *)ubuf,
69203 ulen, 1, maskp, nmaskbits);
69204
69205 }
69206@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
69207 {
69208 unsigned a, b;
69209 int c, old_c, totaldigits;
69210- const char __user __force *ubuf = (const char __user __force *)buf;
69211+ const char __user *ubuf = (const char __force_user *)buf;
69212 int exp_digit, in_range;
69213
69214 totaldigits = c = 0;
69215@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
69216 {
69217 if (!access_ok(VERIFY_READ, ubuf, ulen))
69218 return -EFAULT;
69219- return __bitmap_parselist((const char __force *)ubuf,
69220+ return __bitmap_parselist((const char __force_kernel *)ubuf,
69221 ulen, 1, maskp, nmaskbits);
69222 }
69223 EXPORT_SYMBOL(bitmap_parselist_user);
69224diff --git a/lib/bug.c b/lib/bug.c
69225index a28c141..2bd3d95 100644
69226--- a/lib/bug.c
69227+++ b/lib/bug.c
69228@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
69229 return BUG_TRAP_TYPE_NONE;
69230
69231 bug = find_bug(bugaddr);
69232+ if (!bug)
69233+ return BUG_TRAP_TYPE_NONE;
69234
69235 file = NULL;
69236 line = 0;
69237diff --git a/lib/debugobjects.c b/lib/debugobjects.c
69238index 0ab9ae8..f01ceca 100644
69239--- a/lib/debugobjects.c
69240+++ b/lib/debugobjects.c
69241@@ -288,7 +288,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
69242 if (limit > 4)
69243 return;
69244
69245- is_on_stack = object_is_on_stack(addr);
69246+ is_on_stack = object_starts_on_stack(addr);
69247 if (is_on_stack == onstack)
69248 return;
69249
69250diff --git a/lib/devres.c b/lib/devres.c
69251index 80b9c76..9e32279 100644
69252--- a/lib/devres.c
69253+++ b/lib/devres.c
69254@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
69255 void devm_iounmap(struct device *dev, void __iomem *addr)
69256 {
69257 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
69258- (void *)addr));
69259+ (void __force *)addr));
69260 iounmap(addr);
69261 }
69262 EXPORT_SYMBOL(devm_iounmap);
69263@@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
69264 {
69265 ioport_unmap(addr);
69266 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
69267- devm_ioport_map_match, (void *)addr));
69268+ devm_ioport_map_match, (void __force *)addr));
69269 }
69270 EXPORT_SYMBOL(devm_ioport_unmap);
69271
69272diff --git a/lib/dma-debug.c b/lib/dma-debug.c
69273index 13ef233..5241683 100644
69274--- a/lib/dma-debug.c
69275+++ b/lib/dma-debug.c
69276@@ -924,7 +924,7 @@ out:
69277
69278 static void check_for_stack(struct device *dev, void *addr)
69279 {
69280- if (object_is_on_stack(addr))
69281+ if (object_starts_on_stack(addr))
69282 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
69283 "stack [addr=%p]\n", addr);
69284 }
69285diff --git a/lib/extable.c b/lib/extable.c
69286index 4cac81e..63e9b8f 100644
69287--- a/lib/extable.c
69288+++ b/lib/extable.c
69289@@ -13,6 +13,7 @@
69290 #include <linux/init.h>
69291 #include <linux/sort.h>
69292 #include <asm/uaccess.h>
69293+#include <asm/pgtable.h>
69294
69295 #ifndef ARCH_HAS_SORT_EXTABLE
69296 /*
69297@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
69298 void sort_extable(struct exception_table_entry *start,
69299 struct exception_table_entry *finish)
69300 {
69301+ pax_open_kernel();
69302 sort(start, finish - start, sizeof(struct exception_table_entry),
69303 cmp_ex, NULL);
69304+ pax_close_kernel();
69305 }
69306
69307 #ifdef CONFIG_MODULES
69308diff --git a/lib/inflate.c b/lib/inflate.c
69309index 013a761..c28f3fc 100644
69310--- a/lib/inflate.c
69311+++ b/lib/inflate.c
69312@@ -269,7 +269,7 @@ static void free(void *where)
69313 malloc_ptr = free_mem_ptr;
69314 }
69315 #else
69316-#define malloc(a) kmalloc(a, GFP_KERNEL)
69317+#define malloc(a) kmalloc((a), GFP_KERNEL)
69318 #define free(a) kfree(a)
69319 #endif
69320
69321diff --git a/lib/ioremap.c b/lib/ioremap.c
69322index 0c9216c..863bd89 100644
69323--- a/lib/ioremap.c
69324+++ b/lib/ioremap.c
69325@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
69326 unsigned long next;
69327
69328 phys_addr -= addr;
69329- pmd = pmd_alloc(&init_mm, pud, addr);
69330+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
69331 if (!pmd)
69332 return -ENOMEM;
69333 do {
69334@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
69335 unsigned long next;
69336
69337 phys_addr -= addr;
69338- pud = pud_alloc(&init_mm, pgd, addr);
69339+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
69340 if (!pud)
69341 return -ENOMEM;
69342 do {
69343diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
69344index bd2bea9..6b3c95e 100644
69345--- a/lib/is_single_threaded.c
69346+++ b/lib/is_single_threaded.c
69347@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
69348 struct task_struct *p, *t;
69349 bool ret;
69350
69351+ if (!mm)
69352+ return true;
69353+
69354 if (atomic_read(&task->signal->live) != 1)
69355 return false;
69356
69357diff --git a/lib/radix-tree.c b/lib/radix-tree.c
69358index 3ac50dc..240bb7e 100644
69359--- a/lib/radix-tree.c
69360+++ b/lib/radix-tree.c
69361@@ -79,7 +79,7 @@ struct radix_tree_preload {
69362 int nr;
69363 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
69364 };
69365-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
69366+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
69367
69368 static inline void *ptr_to_indirect(void *ptr)
69369 {
69370diff --git a/lib/vsprintf.c b/lib/vsprintf.c
69371index abbabec..362988d 100644
69372--- a/lib/vsprintf.c
69373+++ b/lib/vsprintf.c
69374@@ -16,6 +16,9 @@
69375 * - scnprintf and vscnprintf
69376 */
69377
69378+#ifdef CONFIG_GRKERNSEC_HIDESYM
69379+#define __INCLUDED_BY_HIDESYM 1
69380+#endif
69381 #include <stdarg.h>
69382 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
69383 #include <linux/types.h>
69384@@ -433,7 +436,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
69385 char sym[KSYM_SYMBOL_LEN];
69386 if (ext == 'B')
69387 sprint_backtrace(sym, value);
69388- else if (ext != 'f' && ext != 's')
69389+ else if (ext != 'f' && ext != 's' && ext != 'a')
69390 sprint_symbol(sym, value);
69391 else
69392 kallsyms_lookup(value, NULL, NULL, NULL, sym);
69393@@ -809,7 +812,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
69394 return number(buf, end, *(const netdev_features_t *)addr, spec);
69395 }
69396
69397+#ifdef CONFIG_GRKERNSEC_HIDESYM
69398+int kptr_restrict __read_mostly = 2;
69399+#else
69400 int kptr_restrict __read_mostly;
69401+#endif
69402
69403 /*
69404 * Show a '%p' thing. A kernel extension is that the '%p' is followed
69405@@ -823,6 +830,8 @@ int kptr_restrict __read_mostly;
69406 * - 'S' For symbolic direct pointers with offset
69407 * - 's' For symbolic direct pointers without offset
69408 * - 'B' For backtraced symbolic direct pointers with offset
69409+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
69410+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
69411 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
69412 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
69413 * - 'M' For a 6-byte MAC address, it prints the address in the
69414@@ -868,12 +877,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
69415 {
69416 if (!ptr && *fmt != 'K') {
69417 /*
69418- * Print (null) with the same width as a pointer so it makes
69419+ * Print (nil) with the same width as a pointer so it makes
69420 * tabular output look nice.
69421 */
69422 if (spec.field_width == -1)
69423 spec.field_width = 2 * sizeof(void *);
69424- return string(buf, end, "(null)", spec);
69425+ return string(buf, end, "(nil)", spec);
69426 }
69427
69428 switch (*fmt) {
69429@@ -883,6 +892,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
69430 /* Fallthrough */
69431 case 'S':
69432 case 's':
69433+#ifdef CONFIG_GRKERNSEC_HIDESYM
69434+ break;
69435+#else
69436+ return symbol_string(buf, end, ptr, spec, *fmt);
69437+#endif
69438+ case 'A':
69439+ case 'a':
69440 case 'B':
69441 return symbol_string(buf, end, ptr, spec, *fmt);
69442 case 'R':
69443@@ -1653,11 +1669,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
69444 typeof(type) value; \
69445 if (sizeof(type) == 8) { \
69446 args = PTR_ALIGN(args, sizeof(u32)); \
69447- *(u32 *)&value = *(u32 *)args; \
69448- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
69449+ *(u32 *)&value = *(const u32 *)args; \
69450+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
69451 } else { \
69452 args = PTR_ALIGN(args, sizeof(type)); \
69453- value = *(typeof(type) *)args; \
69454+ value = *(const typeof(type) *)args; \
69455 } \
69456 args += sizeof(type); \
69457 value; \
69458@@ -1720,7 +1736,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
69459 case FORMAT_TYPE_STR: {
69460 const char *str_arg = args;
69461 args += strlen(str_arg) + 1;
69462- str = string(str, end, (char *)str_arg, spec);
69463+ str = string(str, end, str_arg, spec);
69464 break;
69465 }
69466
69467diff --git a/localversion-grsec b/localversion-grsec
69468new file mode 100644
69469index 0000000..7cd6065
69470--- /dev/null
69471+++ b/localversion-grsec
69472@@ -0,0 +1 @@
69473+-grsec
69474diff --git a/mm/Kconfig b/mm/Kconfig
69475index e338407..4210331 100644
69476--- a/mm/Kconfig
69477+++ b/mm/Kconfig
69478@@ -247,10 +247,10 @@ config KSM
69479 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
69480
69481 config DEFAULT_MMAP_MIN_ADDR
69482- int "Low address space to protect from user allocation"
69483+ int "Low address space to protect from user allocation"
69484 depends on MMU
69485- default 4096
69486- help
69487+ default 65536
69488+ help
69489 This is the portion of low virtual memory which should be protected
69490 from userspace allocation. Keeping a user from writing to low pages
69491 can help reduce the impact of kernel NULL pointer bugs.
69492@@ -280,7 +280,7 @@ config MEMORY_FAILURE
69493
69494 config HWPOISON_INJECT
69495 tristate "HWPoison pages injector"
69496- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
69497+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
69498 select PROC_PAGE_MONITOR
69499
69500 config NOMMU_INITIAL_TRIM_EXCESS
69501diff --git a/mm/filemap.c b/mm/filemap.c
69502index 79c4b2b..596b417 100644
69503--- a/mm/filemap.c
69504+++ b/mm/filemap.c
69505@@ -1762,7 +1762,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
69506 struct address_space *mapping = file->f_mapping;
69507
69508 if (!mapping->a_ops->readpage)
69509- return -ENOEXEC;
69510+ return -ENODEV;
69511 file_accessed(file);
69512 vma->vm_ops = &generic_file_vm_ops;
69513 vma->vm_flags |= VM_CAN_NONLINEAR;
69514@@ -2168,6 +2168,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
69515 *pos = i_size_read(inode);
69516
69517 if (limit != RLIM_INFINITY) {
69518+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
69519 if (*pos >= limit) {
69520 send_sig(SIGXFSZ, current, 0);
69521 return -EFBIG;
69522diff --git a/mm/fremap.c b/mm/fremap.c
69523index 9ed4fd4..c42648d 100644
69524--- a/mm/fremap.c
69525+++ b/mm/fremap.c
69526@@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
69527 retry:
69528 vma = find_vma(mm, start);
69529
69530+#ifdef CONFIG_PAX_SEGMEXEC
69531+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
69532+ goto out;
69533+#endif
69534+
69535 /*
69536 * Make sure the vma is shared, that it supports prefaulting,
69537 * and that the remapped range is valid and fully within
69538diff --git a/mm/highmem.c b/mm/highmem.c
69539index 57d82c6..e9e0552 100644
69540--- a/mm/highmem.c
69541+++ b/mm/highmem.c
69542@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
69543 * So no dangers, even with speculative execution.
69544 */
69545 page = pte_page(pkmap_page_table[i]);
69546+ pax_open_kernel();
69547 pte_clear(&init_mm, (unsigned long)page_address(page),
69548 &pkmap_page_table[i]);
69549-
69550+ pax_close_kernel();
69551 set_page_address(page, NULL);
69552 need_flush = 1;
69553 }
69554@@ -186,9 +187,11 @@ start:
69555 }
69556 }
69557 vaddr = PKMAP_ADDR(last_pkmap_nr);
69558+
69559+ pax_open_kernel();
69560 set_pte_at(&init_mm, vaddr,
69561 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
69562-
69563+ pax_close_kernel();
69564 pkmap_count[last_pkmap_nr] = 1;
69565 set_page_address(page, (void *)vaddr);
69566
69567diff --git a/mm/huge_memory.c b/mm/huge_memory.c
69568index f0e5306..cb9398e 100644
69569--- a/mm/huge_memory.c
69570+++ b/mm/huge_memory.c
69571@@ -733,7 +733,7 @@ out:
69572 * run pte_offset_map on the pmd, if an huge pmd could
69573 * materialize from under us from a different thread.
69574 */
69575- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
69576+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
69577 return VM_FAULT_OOM;
69578 /* if an huge pmd materialized from under us just retry later */
69579 if (unlikely(pmd_trans_huge(*pmd)))
69580diff --git a/mm/hugetlb.c b/mm/hugetlb.c
69581index 263e177..3f36aec 100644
69582--- a/mm/hugetlb.c
69583+++ b/mm/hugetlb.c
69584@@ -2446,6 +2446,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
69585 return 1;
69586 }
69587
69588+#ifdef CONFIG_PAX_SEGMEXEC
69589+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
69590+{
69591+ struct mm_struct *mm = vma->vm_mm;
69592+ struct vm_area_struct *vma_m;
69593+ unsigned long address_m;
69594+ pte_t *ptep_m;
69595+
69596+ vma_m = pax_find_mirror_vma(vma);
69597+ if (!vma_m)
69598+ return;
69599+
69600+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69601+ address_m = address + SEGMEXEC_TASK_SIZE;
69602+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
69603+ get_page(page_m);
69604+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
69605+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
69606+}
69607+#endif
69608+
69609 /*
69610 * Hugetlb_cow() should be called with page lock of the original hugepage held.
69611 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
69612@@ -2558,6 +2579,11 @@ retry_avoidcopy:
69613 make_huge_pte(vma, new_page, 1));
69614 page_remove_rmap(old_page);
69615 hugepage_add_new_anon_rmap(new_page, vma, address);
69616+
69617+#ifdef CONFIG_PAX_SEGMEXEC
69618+ pax_mirror_huge_pte(vma, address, new_page);
69619+#endif
69620+
69621 /* Make the old page be freed below */
69622 new_page = old_page;
69623 mmu_notifier_invalidate_range_end(mm,
69624@@ -2712,6 +2738,10 @@ retry:
69625 && (vma->vm_flags & VM_SHARED)));
69626 set_huge_pte_at(mm, address, ptep, new_pte);
69627
69628+#ifdef CONFIG_PAX_SEGMEXEC
69629+ pax_mirror_huge_pte(vma, address, page);
69630+#endif
69631+
69632 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
69633 /* Optimization, do the COW without a second fault */
69634 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
69635@@ -2741,6 +2771,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69636 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
69637 struct hstate *h = hstate_vma(vma);
69638
69639+#ifdef CONFIG_PAX_SEGMEXEC
69640+ struct vm_area_struct *vma_m;
69641+#endif
69642+
69643 address &= huge_page_mask(h);
69644
69645 ptep = huge_pte_offset(mm, address);
69646@@ -2754,6 +2788,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69647 VM_FAULT_SET_HINDEX(h - hstates);
69648 }
69649
69650+#ifdef CONFIG_PAX_SEGMEXEC
69651+ vma_m = pax_find_mirror_vma(vma);
69652+ if (vma_m) {
69653+ unsigned long address_m;
69654+
69655+ if (vma->vm_start > vma_m->vm_start) {
69656+ address_m = address;
69657+ address -= SEGMEXEC_TASK_SIZE;
69658+ vma = vma_m;
69659+ h = hstate_vma(vma);
69660+ } else
69661+ address_m = address + SEGMEXEC_TASK_SIZE;
69662+
69663+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
69664+ return VM_FAULT_OOM;
69665+ address_m &= HPAGE_MASK;
69666+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
69667+ }
69668+#endif
69669+
69670 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
69671 if (!ptep)
69672 return VM_FAULT_OOM;
69673diff --git a/mm/internal.h b/mm/internal.h
69674index 2189af4..f2ca332 100644
69675--- a/mm/internal.h
69676+++ b/mm/internal.h
69677@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
69678 * in mm/page_alloc.c
69679 */
69680 extern void __free_pages_bootmem(struct page *page, unsigned int order);
69681+extern void free_compound_page(struct page *page);
69682 extern void prep_compound_page(struct page *page, unsigned long order);
69683 #ifdef CONFIG_MEMORY_FAILURE
69684 extern bool is_free_buddy_page(struct page *page);
69685diff --git a/mm/kmemleak.c b/mm/kmemleak.c
69686index 45eb621..6ccd8ea 100644
69687--- a/mm/kmemleak.c
69688+++ b/mm/kmemleak.c
69689@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
69690
69691 for (i = 0; i < object->trace_len; i++) {
69692 void *ptr = (void *)object->trace[i];
69693- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
69694+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
69695 }
69696 }
69697
69698diff --git a/mm/maccess.c b/mm/maccess.c
69699index d53adf9..03a24bf 100644
69700--- a/mm/maccess.c
69701+++ b/mm/maccess.c
69702@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
69703 set_fs(KERNEL_DS);
69704 pagefault_disable();
69705 ret = __copy_from_user_inatomic(dst,
69706- (__force const void __user *)src, size);
69707+ (const void __force_user *)src, size);
69708 pagefault_enable();
69709 set_fs(old_fs);
69710
69711@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
69712
69713 set_fs(KERNEL_DS);
69714 pagefault_disable();
69715- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
69716+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
69717 pagefault_enable();
69718 set_fs(old_fs);
69719
69720diff --git a/mm/madvise.c b/mm/madvise.c
69721index 1ccbba5..79e16f9 100644
69722--- a/mm/madvise.c
69723+++ b/mm/madvise.c
69724@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
69725 pgoff_t pgoff;
69726 unsigned long new_flags = vma->vm_flags;
69727
69728+#ifdef CONFIG_PAX_SEGMEXEC
69729+ struct vm_area_struct *vma_m;
69730+#endif
69731+
69732 switch (behavior) {
69733 case MADV_NORMAL:
69734 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
69735@@ -116,6 +120,13 @@ success:
69736 /*
69737 * vm_flags is protected by the mmap_sem held in write mode.
69738 */
69739+
69740+#ifdef CONFIG_PAX_SEGMEXEC
69741+ vma_m = pax_find_mirror_vma(vma);
69742+ if (vma_m)
69743+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
69744+#endif
69745+
69746 vma->vm_flags = new_flags;
69747
69748 out:
69749@@ -174,6 +185,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
69750 struct vm_area_struct ** prev,
69751 unsigned long start, unsigned long end)
69752 {
69753+
69754+#ifdef CONFIG_PAX_SEGMEXEC
69755+ struct vm_area_struct *vma_m;
69756+#endif
69757+
69758 *prev = vma;
69759 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
69760 return -EINVAL;
69761@@ -186,6 +202,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
69762 zap_page_range(vma, start, end - start, &details);
69763 } else
69764 zap_page_range(vma, start, end - start, NULL);
69765+
69766+#ifdef CONFIG_PAX_SEGMEXEC
69767+ vma_m = pax_find_mirror_vma(vma);
69768+ if (vma_m) {
69769+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
69770+ struct zap_details details = {
69771+ .nonlinear_vma = vma_m,
69772+ .last_index = ULONG_MAX,
69773+ };
69774+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
69775+ } else
69776+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
69777+ }
69778+#endif
69779+
69780 return 0;
69781 }
69782
69783@@ -384,6 +415,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
69784 if (end < start)
69785 goto out;
69786
69787+#ifdef CONFIG_PAX_SEGMEXEC
69788+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69789+ if (end > SEGMEXEC_TASK_SIZE)
69790+ goto out;
69791+ } else
69792+#endif
69793+
69794+ if (end > TASK_SIZE)
69795+ goto out;
69796+
69797 error = 0;
69798 if (end == start)
69799 goto out;
69800diff --git a/mm/memory-failure.c b/mm/memory-failure.c
69801index 97cc273..6ed703f 100644
69802--- a/mm/memory-failure.c
69803+++ b/mm/memory-failure.c
69804@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
69805
69806 int sysctl_memory_failure_recovery __read_mostly = 1;
69807
69808-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
69809+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
69810
69811 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
69812
69813@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
69814 pfn, t->comm, t->pid);
69815 si.si_signo = SIGBUS;
69816 si.si_errno = 0;
69817- si.si_addr = (void *)addr;
69818+ si.si_addr = (void __user *)addr;
69819 #ifdef __ARCH_SI_TRAPNO
69820 si.si_trapno = trapno;
69821 #endif
69822@@ -1036,7 +1036,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
69823 }
69824
69825 nr_pages = 1 << compound_trans_order(hpage);
69826- atomic_long_add(nr_pages, &mce_bad_pages);
69827+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
69828
69829 /*
69830 * We need/can do nothing about count=0 pages.
69831@@ -1066,7 +1066,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
69832 if (!PageHWPoison(hpage)
69833 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
69834 || (p != hpage && TestSetPageHWPoison(hpage))) {
69835- atomic_long_sub(nr_pages, &mce_bad_pages);
69836+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69837 return 0;
69838 }
69839 set_page_hwpoison_huge_page(hpage);
69840@@ -1124,7 +1124,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
69841 }
69842 if (hwpoison_filter(p)) {
69843 if (TestClearPageHWPoison(p))
69844- atomic_long_sub(nr_pages, &mce_bad_pages);
69845+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69846 unlock_page(hpage);
69847 put_page(hpage);
69848 return 0;
69849@@ -1319,7 +1319,7 @@ int unpoison_memory(unsigned long pfn)
69850 return 0;
69851 }
69852 if (TestClearPageHWPoison(p))
69853- atomic_long_sub(nr_pages, &mce_bad_pages);
69854+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69855 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
69856 return 0;
69857 }
69858@@ -1333,7 +1333,7 @@ int unpoison_memory(unsigned long pfn)
69859 */
69860 if (TestClearPageHWPoison(page)) {
69861 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
69862- atomic_long_sub(nr_pages, &mce_bad_pages);
69863+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69864 freeit = 1;
69865 if (PageHuge(page))
69866 clear_page_hwpoison_huge_page(page);
69867@@ -1446,7 +1446,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
69868 }
69869 done:
69870 if (!PageHWPoison(hpage))
69871- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
69872+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
69873 set_page_hwpoison_huge_page(hpage);
69874 dequeue_hwpoisoned_huge_page(hpage);
69875 /* keep elevated page count for bad page */
69876@@ -1577,7 +1577,7 @@ int soft_offline_page(struct page *page, int flags)
69877 return ret;
69878
69879 done:
69880- atomic_long_add(1, &mce_bad_pages);
69881+ atomic_long_add_unchecked(1, &mce_bad_pages);
69882 SetPageHWPoison(page);
69883 /* keep elevated page count for bad page */
69884 return ret;
69885diff --git a/mm/memory.c b/mm/memory.c
69886index 6105f47..3363489 100644
69887--- a/mm/memory.c
69888+++ b/mm/memory.c
69889@@ -434,8 +434,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
69890 return;
69891
69892 pmd = pmd_offset(pud, start);
69893+
69894+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
69895 pud_clear(pud);
69896 pmd_free_tlb(tlb, pmd, start);
69897+#endif
69898+
69899 }
69900
69901 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
69902@@ -466,9 +470,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
69903 if (end - 1 > ceiling - 1)
69904 return;
69905
69906+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
69907 pud = pud_offset(pgd, start);
69908 pgd_clear(pgd);
69909 pud_free_tlb(tlb, pud, start);
69910+#endif
69911+
69912 }
69913
69914 /*
69915@@ -1597,12 +1604,6 @@ no_page_table:
69916 return page;
69917 }
69918
69919-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
69920-{
69921- return stack_guard_page_start(vma, addr) ||
69922- stack_guard_page_end(vma, addr+PAGE_SIZE);
69923-}
69924-
69925 /**
69926 * __get_user_pages() - pin user pages in memory
69927 * @tsk: task_struct of target task
69928@@ -1675,10 +1676,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69929 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
69930 i = 0;
69931
69932- do {
69933+ while (nr_pages) {
69934 struct vm_area_struct *vma;
69935
69936- vma = find_extend_vma(mm, start);
69937+ vma = find_vma(mm, start);
69938 if (!vma && in_gate_area(mm, start)) {
69939 unsigned long pg = start & PAGE_MASK;
69940 pgd_t *pgd;
69941@@ -1726,7 +1727,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69942 goto next_page;
69943 }
69944
69945- if (!vma ||
69946+ if (!vma || start < vma->vm_start ||
69947 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
69948 !(vm_flags & vma->vm_flags))
69949 return i ? : -EFAULT;
69950@@ -1753,11 +1754,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69951 int ret;
69952 unsigned int fault_flags = 0;
69953
69954- /* For mlock, just skip the stack guard page. */
69955- if (foll_flags & FOLL_MLOCK) {
69956- if (stack_guard_page(vma, start))
69957- goto next_page;
69958- }
69959 if (foll_flags & FOLL_WRITE)
69960 fault_flags |= FAULT_FLAG_WRITE;
69961 if (nonblocking)
69962@@ -1831,7 +1827,7 @@ next_page:
69963 start += PAGE_SIZE;
69964 nr_pages--;
69965 } while (nr_pages && start < vma->vm_end);
69966- } while (nr_pages);
69967+ }
69968 return i;
69969 }
69970 EXPORT_SYMBOL(__get_user_pages);
69971@@ -2038,6 +2034,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
69972 page_add_file_rmap(page);
69973 set_pte_at(mm, addr, pte, mk_pte(page, prot));
69974
69975+#ifdef CONFIG_PAX_SEGMEXEC
69976+ pax_mirror_file_pte(vma, addr, page, ptl);
69977+#endif
69978+
69979 retval = 0;
69980 pte_unmap_unlock(pte, ptl);
69981 return retval;
69982@@ -2072,10 +2072,22 @@ out:
69983 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
69984 struct page *page)
69985 {
69986+
69987+#ifdef CONFIG_PAX_SEGMEXEC
69988+ struct vm_area_struct *vma_m;
69989+#endif
69990+
69991 if (addr < vma->vm_start || addr >= vma->vm_end)
69992 return -EFAULT;
69993 if (!page_count(page))
69994 return -EINVAL;
69995+
69996+#ifdef CONFIG_PAX_SEGMEXEC
69997+ vma_m = pax_find_mirror_vma(vma);
69998+ if (vma_m)
69999+ vma_m->vm_flags |= VM_INSERTPAGE;
70000+#endif
70001+
70002 vma->vm_flags |= VM_INSERTPAGE;
70003 return insert_page(vma, addr, page, vma->vm_page_prot);
70004 }
70005@@ -2161,6 +2173,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
70006 unsigned long pfn)
70007 {
70008 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
70009+ BUG_ON(vma->vm_mirror);
70010
70011 if (addr < vma->vm_start || addr >= vma->vm_end)
70012 return -EFAULT;
70013@@ -2368,7 +2381,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
70014
70015 BUG_ON(pud_huge(*pud));
70016
70017- pmd = pmd_alloc(mm, pud, addr);
70018+ pmd = (mm == &init_mm) ?
70019+ pmd_alloc_kernel(mm, pud, addr) :
70020+ pmd_alloc(mm, pud, addr);
70021 if (!pmd)
70022 return -ENOMEM;
70023 do {
70024@@ -2388,7 +2403,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
70025 unsigned long next;
70026 int err;
70027
70028- pud = pud_alloc(mm, pgd, addr);
70029+ pud = (mm == &init_mm) ?
70030+ pud_alloc_kernel(mm, pgd, addr) :
70031+ pud_alloc(mm, pgd, addr);
70032 if (!pud)
70033 return -ENOMEM;
70034 do {
70035@@ -2476,6 +2493,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
70036 copy_user_highpage(dst, src, va, vma);
70037 }
70038
70039+#ifdef CONFIG_PAX_SEGMEXEC
70040+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
70041+{
70042+ struct mm_struct *mm = vma->vm_mm;
70043+ spinlock_t *ptl;
70044+ pte_t *pte, entry;
70045+
70046+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
70047+ entry = *pte;
70048+ if (!pte_present(entry)) {
70049+ if (!pte_none(entry)) {
70050+ BUG_ON(pte_file(entry));
70051+ free_swap_and_cache(pte_to_swp_entry(entry));
70052+ pte_clear_not_present_full(mm, address, pte, 0);
70053+ }
70054+ } else {
70055+ struct page *page;
70056+
70057+ flush_cache_page(vma, address, pte_pfn(entry));
70058+ entry = ptep_clear_flush(vma, address, pte);
70059+ BUG_ON(pte_dirty(entry));
70060+ page = vm_normal_page(vma, address, entry);
70061+ if (page) {
70062+ update_hiwater_rss(mm);
70063+ if (PageAnon(page))
70064+ dec_mm_counter_fast(mm, MM_ANONPAGES);
70065+ else
70066+ dec_mm_counter_fast(mm, MM_FILEPAGES);
70067+ page_remove_rmap(page);
70068+ page_cache_release(page);
70069+ }
70070+ }
70071+ pte_unmap_unlock(pte, ptl);
70072+}
70073+
70074+/* PaX: if vma is mirrored, synchronize the mirror's PTE
70075+ *
70076+ * the ptl of the lower mapped page is held on entry and is not released on exit
70077+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
70078+ */
70079+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
70080+{
70081+ struct mm_struct *mm = vma->vm_mm;
70082+ unsigned long address_m;
70083+ spinlock_t *ptl_m;
70084+ struct vm_area_struct *vma_m;
70085+ pmd_t *pmd_m;
70086+ pte_t *pte_m, entry_m;
70087+
70088+ BUG_ON(!page_m || !PageAnon(page_m));
70089+
70090+ vma_m = pax_find_mirror_vma(vma);
70091+ if (!vma_m)
70092+ return;
70093+
70094+ BUG_ON(!PageLocked(page_m));
70095+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70096+ address_m = address + SEGMEXEC_TASK_SIZE;
70097+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70098+ pte_m = pte_offset_map(pmd_m, address_m);
70099+ ptl_m = pte_lockptr(mm, pmd_m);
70100+ if (ptl != ptl_m) {
70101+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70102+ if (!pte_none(*pte_m))
70103+ goto out;
70104+ }
70105+
70106+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70107+ page_cache_get(page_m);
70108+ page_add_anon_rmap(page_m, vma_m, address_m);
70109+ inc_mm_counter_fast(mm, MM_ANONPAGES);
70110+ set_pte_at(mm, address_m, pte_m, entry_m);
70111+ update_mmu_cache(vma_m, address_m, entry_m);
70112+out:
70113+ if (ptl != ptl_m)
70114+ spin_unlock(ptl_m);
70115+ pte_unmap(pte_m);
70116+ unlock_page(page_m);
70117+}
70118+
70119+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
70120+{
70121+ struct mm_struct *mm = vma->vm_mm;
70122+ unsigned long address_m;
70123+ spinlock_t *ptl_m;
70124+ struct vm_area_struct *vma_m;
70125+ pmd_t *pmd_m;
70126+ pte_t *pte_m, entry_m;
70127+
70128+ BUG_ON(!page_m || PageAnon(page_m));
70129+
70130+ vma_m = pax_find_mirror_vma(vma);
70131+ if (!vma_m)
70132+ return;
70133+
70134+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70135+ address_m = address + SEGMEXEC_TASK_SIZE;
70136+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70137+ pte_m = pte_offset_map(pmd_m, address_m);
70138+ ptl_m = pte_lockptr(mm, pmd_m);
70139+ if (ptl != ptl_m) {
70140+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70141+ if (!pte_none(*pte_m))
70142+ goto out;
70143+ }
70144+
70145+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70146+ page_cache_get(page_m);
70147+ page_add_file_rmap(page_m);
70148+ inc_mm_counter_fast(mm, MM_FILEPAGES);
70149+ set_pte_at(mm, address_m, pte_m, entry_m);
70150+ update_mmu_cache(vma_m, address_m, entry_m);
70151+out:
70152+ if (ptl != ptl_m)
70153+ spin_unlock(ptl_m);
70154+ pte_unmap(pte_m);
70155+}
70156+
70157+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
70158+{
70159+ struct mm_struct *mm = vma->vm_mm;
70160+ unsigned long address_m;
70161+ spinlock_t *ptl_m;
70162+ struct vm_area_struct *vma_m;
70163+ pmd_t *pmd_m;
70164+ pte_t *pte_m, entry_m;
70165+
70166+ vma_m = pax_find_mirror_vma(vma);
70167+ if (!vma_m)
70168+ return;
70169+
70170+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70171+ address_m = address + SEGMEXEC_TASK_SIZE;
70172+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70173+ pte_m = pte_offset_map(pmd_m, address_m);
70174+ ptl_m = pte_lockptr(mm, pmd_m);
70175+ if (ptl != ptl_m) {
70176+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70177+ if (!pte_none(*pte_m))
70178+ goto out;
70179+ }
70180+
70181+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
70182+ set_pte_at(mm, address_m, pte_m, entry_m);
70183+out:
70184+ if (ptl != ptl_m)
70185+ spin_unlock(ptl_m);
70186+ pte_unmap(pte_m);
70187+}
70188+
70189+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
70190+{
70191+ struct page *page_m;
70192+ pte_t entry;
70193+
70194+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
70195+ goto out;
70196+
70197+ entry = *pte;
70198+ page_m = vm_normal_page(vma, address, entry);
70199+ if (!page_m)
70200+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
70201+ else if (PageAnon(page_m)) {
70202+ if (pax_find_mirror_vma(vma)) {
70203+ pte_unmap_unlock(pte, ptl);
70204+ lock_page(page_m);
70205+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
70206+ if (pte_same(entry, *pte))
70207+ pax_mirror_anon_pte(vma, address, page_m, ptl);
70208+ else
70209+ unlock_page(page_m);
70210+ }
70211+ } else
70212+ pax_mirror_file_pte(vma, address, page_m, ptl);
70213+
70214+out:
70215+ pte_unmap_unlock(pte, ptl);
70216+}
70217+#endif
70218+
70219 /*
70220 * This routine handles present pages, when users try to write
70221 * to a shared page. It is done by copying the page to a new address
70222@@ -2687,6 +2884,12 @@ gotten:
70223 */
70224 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70225 if (likely(pte_same(*page_table, orig_pte))) {
70226+
70227+#ifdef CONFIG_PAX_SEGMEXEC
70228+ if (pax_find_mirror_vma(vma))
70229+ BUG_ON(!trylock_page(new_page));
70230+#endif
70231+
70232 if (old_page) {
70233 if (!PageAnon(old_page)) {
70234 dec_mm_counter_fast(mm, MM_FILEPAGES);
70235@@ -2738,6 +2941,10 @@ gotten:
70236 page_remove_rmap(old_page);
70237 }
70238
70239+#ifdef CONFIG_PAX_SEGMEXEC
70240+ pax_mirror_anon_pte(vma, address, new_page, ptl);
70241+#endif
70242+
70243 /* Free the old page.. */
70244 new_page = old_page;
70245 ret |= VM_FAULT_WRITE;
70246@@ -3017,6 +3224,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70247 swap_free(entry);
70248 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
70249 try_to_free_swap(page);
70250+
70251+#ifdef CONFIG_PAX_SEGMEXEC
70252+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
70253+#endif
70254+
70255 unlock_page(page);
70256 if (swapcache) {
70257 /*
70258@@ -3040,6 +3252,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70259
70260 /* No need to invalidate - it was non-present before */
70261 update_mmu_cache(vma, address, page_table);
70262+
70263+#ifdef CONFIG_PAX_SEGMEXEC
70264+ pax_mirror_anon_pte(vma, address, page, ptl);
70265+#endif
70266+
70267 unlock:
70268 pte_unmap_unlock(page_table, ptl);
70269 out:
70270@@ -3059,40 +3276,6 @@ out_release:
70271 }
70272
70273 /*
70274- * This is like a special single-page "expand_{down|up}wards()",
70275- * except we must first make sure that 'address{-|+}PAGE_SIZE'
70276- * doesn't hit another vma.
70277- */
70278-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
70279-{
70280- address &= PAGE_MASK;
70281- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
70282- struct vm_area_struct *prev = vma->vm_prev;
70283-
70284- /*
70285- * Is there a mapping abutting this one below?
70286- *
70287- * That's only ok if it's the same stack mapping
70288- * that has gotten split..
70289- */
70290- if (prev && prev->vm_end == address)
70291- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
70292-
70293- expand_downwards(vma, address - PAGE_SIZE);
70294- }
70295- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
70296- struct vm_area_struct *next = vma->vm_next;
70297-
70298- /* As VM_GROWSDOWN but s/below/above/ */
70299- if (next && next->vm_start == address + PAGE_SIZE)
70300- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
70301-
70302- expand_upwards(vma, address + PAGE_SIZE);
70303- }
70304- return 0;
70305-}
70306-
70307-/*
70308 * We enter with non-exclusive mmap_sem (to exclude vma changes,
70309 * but allow concurrent faults), and pte mapped but not yet locked.
70310 * We return with mmap_sem still held, but pte unmapped and unlocked.
70311@@ -3101,27 +3284,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
70312 unsigned long address, pte_t *page_table, pmd_t *pmd,
70313 unsigned int flags)
70314 {
70315- struct page *page;
70316+ struct page *page = NULL;
70317 spinlock_t *ptl;
70318 pte_t entry;
70319
70320- pte_unmap(page_table);
70321-
70322- /* Check if we need to add a guard page to the stack */
70323- if (check_stack_guard_page(vma, address) < 0)
70324- return VM_FAULT_SIGBUS;
70325-
70326- /* Use the zero-page for reads */
70327 if (!(flags & FAULT_FLAG_WRITE)) {
70328 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
70329 vma->vm_page_prot));
70330- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70331+ ptl = pte_lockptr(mm, pmd);
70332+ spin_lock(ptl);
70333 if (!pte_none(*page_table))
70334 goto unlock;
70335 goto setpte;
70336 }
70337
70338 /* Allocate our own private page. */
70339+ pte_unmap(page_table);
70340+
70341 if (unlikely(anon_vma_prepare(vma)))
70342 goto oom;
70343 page = alloc_zeroed_user_highpage_movable(vma, address);
70344@@ -3140,6 +3319,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
70345 if (!pte_none(*page_table))
70346 goto release;
70347
70348+#ifdef CONFIG_PAX_SEGMEXEC
70349+ if (pax_find_mirror_vma(vma))
70350+ BUG_ON(!trylock_page(page));
70351+#endif
70352+
70353 inc_mm_counter_fast(mm, MM_ANONPAGES);
70354 page_add_new_anon_rmap(page, vma, address);
70355 setpte:
70356@@ -3147,6 +3331,12 @@ setpte:
70357
70358 /* No need to invalidate - it was non-present before */
70359 update_mmu_cache(vma, address, page_table);
70360+
70361+#ifdef CONFIG_PAX_SEGMEXEC
70362+ if (page)
70363+ pax_mirror_anon_pte(vma, address, page, ptl);
70364+#endif
70365+
70366 unlock:
70367 pte_unmap_unlock(page_table, ptl);
70368 return 0;
70369@@ -3290,6 +3480,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70370 */
70371 /* Only go through if we didn't race with anybody else... */
70372 if (likely(pte_same(*page_table, orig_pte))) {
70373+
70374+#ifdef CONFIG_PAX_SEGMEXEC
70375+ if (anon && pax_find_mirror_vma(vma))
70376+ BUG_ON(!trylock_page(page));
70377+#endif
70378+
70379 flush_icache_page(vma, page);
70380 entry = mk_pte(page, vma->vm_page_prot);
70381 if (flags & FAULT_FLAG_WRITE)
70382@@ -3309,6 +3505,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70383
70384 /* no need to invalidate: a not-present page won't be cached */
70385 update_mmu_cache(vma, address, page_table);
70386+
70387+#ifdef CONFIG_PAX_SEGMEXEC
70388+ if (anon)
70389+ pax_mirror_anon_pte(vma, address, page, ptl);
70390+ else
70391+ pax_mirror_file_pte(vma, address, page, ptl);
70392+#endif
70393+
70394 } else {
70395 if (cow_page)
70396 mem_cgroup_uncharge_page(cow_page);
70397@@ -3462,6 +3666,12 @@ int handle_pte_fault(struct mm_struct *mm,
70398 if (flags & FAULT_FLAG_WRITE)
70399 flush_tlb_fix_spurious_fault(vma, address);
70400 }
70401+
70402+#ifdef CONFIG_PAX_SEGMEXEC
70403+ pax_mirror_pte(vma, address, pte, pmd, ptl);
70404+ return 0;
70405+#endif
70406+
70407 unlock:
70408 pte_unmap_unlock(pte, ptl);
70409 return 0;
70410@@ -3478,6 +3688,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70411 pmd_t *pmd;
70412 pte_t *pte;
70413
70414+#ifdef CONFIG_PAX_SEGMEXEC
70415+ struct vm_area_struct *vma_m;
70416+#endif
70417+
70418 __set_current_state(TASK_RUNNING);
70419
70420 count_vm_event(PGFAULT);
70421@@ -3489,6 +3703,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70422 if (unlikely(is_vm_hugetlb_page(vma)))
70423 return hugetlb_fault(mm, vma, address, flags);
70424
70425+#ifdef CONFIG_PAX_SEGMEXEC
70426+ vma_m = pax_find_mirror_vma(vma);
70427+ if (vma_m) {
70428+ unsigned long address_m;
70429+ pgd_t *pgd_m;
70430+ pud_t *pud_m;
70431+ pmd_t *pmd_m;
70432+
70433+ if (vma->vm_start > vma_m->vm_start) {
70434+ address_m = address;
70435+ address -= SEGMEXEC_TASK_SIZE;
70436+ vma = vma_m;
70437+ } else
70438+ address_m = address + SEGMEXEC_TASK_SIZE;
70439+
70440+ pgd_m = pgd_offset(mm, address_m);
70441+ pud_m = pud_alloc(mm, pgd_m, address_m);
70442+ if (!pud_m)
70443+ return VM_FAULT_OOM;
70444+ pmd_m = pmd_alloc(mm, pud_m, address_m);
70445+ if (!pmd_m)
70446+ return VM_FAULT_OOM;
70447+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
70448+ return VM_FAULT_OOM;
70449+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
70450+ }
70451+#endif
70452+
70453 pgd = pgd_offset(mm, address);
70454 pud = pud_alloc(mm, pgd, address);
70455 if (!pud)
70456@@ -3518,7 +3760,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70457 * run pte_offset_map on the pmd, if an huge pmd could
70458 * materialize from under us from a different thread.
70459 */
70460- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
70461+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
70462 return VM_FAULT_OOM;
70463 /* if an huge pmd materialized from under us just retry later */
70464 if (unlikely(pmd_trans_huge(*pmd)))
70465@@ -3555,6 +3797,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
70466 spin_unlock(&mm->page_table_lock);
70467 return 0;
70468 }
70469+
70470+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
70471+{
70472+ pud_t *new = pud_alloc_one(mm, address);
70473+ if (!new)
70474+ return -ENOMEM;
70475+
70476+ smp_wmb(); /* See comment in __pte_alloc */
70477+
70478+ spin_lock(&mm->page_table_lock);
70479+ if (pgd_present(*pgd)) /* Another has populated it */
70480+ pud_free(mm, new);
70481+ else
70482+ pgd_populate_kernel(mm, pgd, new);
70483+ spin_unlock(&mm->page_table_lock);
70484+ return 0;
70485+}
70486 #endif /* __PAGETABLE_PUD_FOLDED */
70487
70488 #ifndef __PAGETABLE_PMD_FOLDED
70489@@ -3585,6 +3844,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
70490 spin_unlock(&mm->page_table_lock);
70491 return 0;
70492 }
70493+
70494+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
70495+{
70496+ pmd_t *new = pmd_alloc_one(mm, address);
70497+ if (!new)
70498+ return -ENOMEM;
70499+
70500+ smp_wmb(); /* See comment in __pte_alloc */
70501+
70502+ spin_lock(&mm->page_table_lock);
70503+#ifndef __ARCH_HAS_4LEVEL_HACK
70504+ if (pud_present(*pud)) /* Another has populated it */
70505+ pmd_free(mm, new);
70506+ else
70507+ pud_populate_kernel(mm, pud, new);
70508+#else
70509+ if (pgd_present(*pud)) /* Another has populated it */
70510+ pmd_free(mm, new);
70511+ else
70512+ pgd_populate_kernel(mm, pud, new);
70513+#endif /* __ARCH_HAS_4LEVEL_HACK */
70514+ spin_unlock(&mm->page_table_lock);
70515+ return 0;
70516+}
70517 #endif /* __PAGETABLE_PMD_FOLDED */
70518
70519 int make_pages_present(unsigned long addr, unsigned long end)
70520@@ -3622,7 +3905,7 @@ static int __init gate_vma_init(void)
70521 gate_vma.vm_start = FIXADDR_USER_START;
70522 gate_vma.vm_end = FIXADDR_USER_END;
70523 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
70524- gate_vma.vm_page_prot = __P101;
70525+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
70526
70527 return 0;
70528 }
70529diff --git a/mm/mempolicy.c b/mm/mempolicy.c
70530index bf5b485..e44c2cb 100644
70531--- a/mm/mempolicy.c
70532+++ b/mm/mempolicy.c
70533@@ -619,6 +619,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
70534 unsigned long vmstart;
70535 unsigned long vmend;
70536
70537+#ifdef CONFIG_PAX_SEGMEXEC
70538+ struct vm_area_struct *vma_m;
70539+#endif
70540+
70541 vma = find_vma(mm, start);
70542 if (!vma || vma->vm_start > start)
70543 return -EFAULT;
70544@@ -672,6 +676,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
70545 if (err)
70546 goto out;
70547 }
70548+
70549+#ifdef CONFIG_PAX_SEGMEXEC
70550+ vma_m = pax_find_mirror_vma(vma);
70551+ if (vma_m && vma_m->vm_ops && vma_m->vm_ops->set_policy) {
70552+ err = vma_m->vm_ops->set_policy(vma_m, new_pol);
70553+ if (err)
70554+ goto out;
70555+ }
70556+#endif
70557+
70558 }
70559
70560 out:
70561@@ -1105,6 +1119,17 @@ static long do_mbind(unsigned long start, unsigned long len,
70562
70563 if (end < start)
70564 return -EINVAL;
70565+
70566+#ifdef CONFIG_PAX_SEGMEXEC
70567+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
70568+ if (end > SEGMEXEC_TASK_SIZE)
70569+ return -EINVAL;
70570+ } else
70571+#endif
70572+
70573+ if (end > TASK_SIZE)
70574+ return -EINVAL;
70575+
70576 if (end == start)
70577 return 0;
70578
70579@@ -1328,8 +1353,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
70580 */
70581 tcred = __task_cred(task);
70582 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
70583- cred->uid != tcred->suid && cred->uid != tcred->uid &&
70584- !capable(CAP_SYS_NICE)) {
70585+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
70586 rcu_read_unlock();
70587 err = -EPERM;
70588 goto out_put;
70589@@ -1360,6 +1384,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
70590 goto out;
70591 }
70592
70593+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70594+ if (mm != current->mm &&
70595+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
70596+ mmput(mm);
70597+ err = -EPERM;
70598+ goto out;
70599+ }
70600+#endif
70601+
70602 err = do_migrate_pages(mm, old, new,
70603 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
70604
70605diff --git a/mm/mlock.c b/mm/mlock.c
70606index ef726e8..13e0901 100644
70607--- a/mm/mlock.c
70608+++ b/mm/mlock.c
70609@@ -13,6 +13,7 @@
70610 #include <linux/pagemap.h>
70611 #include <linux/mempolicy.h>
70612 #include <linux/syscalls.h>
70613+#include <linux/security.h>
70614 #include <linux/sched.h>
70615 #include <linux/export.h>
70616 #include <linux/rmap.h>
70617@@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
70618 return -EINVAL;
70619 if (end == start)
70620 return 0;
70621+ if (end > TASK_SIZE)
70622+ return -EINVAL;
70623+
70624 vma = find_vma(current->mm, start);
70625 if (!vma || vma->vm_start > start)
70626 return -ENOMEM;
70627@@ -396,6 +400,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
70628 for (nstart = start ; ; ) {
70629 vm_flags_t newflags;
70630
70631+#ifdef CONFIG_PAX_SEGMEXEC
70632+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
70633+ break;
70634+#endif
70635+
70636 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
70637
70638 newflags = vma->vm_flags | VM_LOCKED;
70639@@ -501,6 +510,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
70640 lock_limit >>= PAGE_SHIFT;
70641
70642 /* check against resource limits */
70643+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
70644 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
70645 error = do_mlock(start, len, 1);
70646 up_write(&current->mm->mmap_sem);
70647@@ -524,17 +534,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
70648 static int do_mlockall(int flags)
70649 {
70650 struct vm_area_struct * vma, * prev = NULL;
70651- unsigned int def_flags = 0;
70652
70653 if (flags & MCL_FUTURE)
70654- def_flags = VM_LOCKED;
70655- current->mm->def_flags = def_flags;
70656+ current->mm->def_flags |= VM_LOCKED;
70657+ else
70658+ current->mm->def_flags &= ~VM_LOCKED;
70659 if (flags == MCL_FUTURE)
70660 goto out;
70661
70662 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
70663 vm_flags_t newflags;
70664
70665+#ifdef CONFIG_PAX_SEGMEXEC
70666+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
70667+ break;
70668+#endif
70669+
70670+ BUG_ON(vma->vm_end > TASK_SIZE);
70671 newflags = vma->vm_flags | VM_LOCKED;
70672 if (!(flags & MCL_CURRENT))
70673 newflags &= ~VM_LOCKED;
70674@@ -567,6 +583,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
70675 lock_limit >>= PAGE_SHIFT;
70676
70677 ret = -ENOMEM;
70678+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
70679 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
70680 capable(CAP_IPC_LOCK))
70681 ret = do_mlockall(flags);
70682diff --git a/mm/mmap.c b/mm/mmap.c
70683index 848ef52..d2b586c 100644
70684--- a/mm/mmap.c
70685+++ b/mm/mmap.c
70686@@ -46,6 +46,16 @@
70687 #define arch_rebalance_pgtables(addr, len) (addr)
70688 #endif
70689
70690+static inline void verify_mm_writelocked(struct mm_struct *mm)
70691+{
70692+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
70693+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
70694+ up_read(&mm->mmap_sem);
70695+ BUG();
70696+ }
70697+#endif
70698+}
70699+
70700 static void unmap_region(struct mm_struct *mm,
70701 struct vm_area_struct *vma, struct vm_area_struct *prev,
70702 unsigned long start, unsigned long end);
70703@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
70704 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
70705 *
70706 */
70707-pgprot_t protection_map[16] = {
70708+pgprot_t protection_map[16] __read_only = {
70709 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
70710 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
70711 };
70712
70713-pgprot_t vm_get_page_prot(unsigned long vm_flags)
70714+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
70715 {
70716- return __pgprot(pgprot_val(protection_map[vm_flags &
70717+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
70718 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
70719 pgprot_val(arch_vm_get_page_prot(vm_flags)));
70720+
70721+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70722+ if (!(__supported_pte_mask & _PAGE_NX) &&
70723+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
70724+ (vm_flags & (VM_READ | VM_WRITE)))
70725+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
70726+#endif
70727+
70728+ return prot;
70729 }
70730 EXPORT_SYMBOL(vm_get_page_prot);
70731
70732 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
70733 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
70734 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
70735+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
70736 /*
70737 * Make sure vm_committed_as in one cacheline and not cacheline shared with
70738 * other variables. It can be updated by several CPUs frequently.
70739@@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
70740 struct vm_area_struct *next = vma->vm_next;
70741
70742 might_sleep();
70743+ BUG_ON(vma->vm_mirror);
70744 if (vma->vm_ops && vma->vm_ops->close)
70745 vma->vm_ops->close(vma);
70746 if (vma->vm_file) {
70747@@ -274,6 +295,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
70748 * not page aligned -Ram Gupta
70749 */
70750 rlim = rlimit(RLIMIT_DATA);
70751+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
70752 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
70753 (mm->end_data - mm->start_data) > rlim)
70754 goto out;
70755@@ -690,6 +712,12 @@ static int
70756 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
70757 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
70758 {
70759+
70760+#ifdef CONFIG_PAX_SEGMEXEC
70761+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
70762+ return 0;
70763+#endif
70764+
70765 if (is_mergeable_vma(vma, file, vm_flags) &&
70766 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
70767 if (vma->vm_pgoff == vm_pgoff)
70768@@ -709,6 +737,12 @@ static int
70769 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
70770 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
70771 {
70772+
70773+#ifdef CONFIG_PAX_SEGMEXEC
70774+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
70775+ return 0;
70776+#endif
70777+
70778 if (is_mergeable_vma(vma, file, vm_flags) &&
70779 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
70780 pgoff_t vm_pglen;
70781@@ -751,13 +785,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
70782 struct vm_area_struct *vma_merge(struct mm_struct *mm,
70783 struct vm_area_struct *prev, unsigned long addr,
70784 unsigned long end, unsigned long vm_flags,
70785- struct anon_vma *anon_vma, struct file *file,
70786+ struct anon_vma *anon_vma, struct file *file,
70787 pgoff_t pgoff, struct mempolicy *policy)
70788 {
70789 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
70790 struct vm_area_struct *area, *next;
70791 int err;
70792
70793+#ifdef CONFIG_PAX_SEGMEXEC
70794+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
70795+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
70796+
70797+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
70798+#endif
70799+
70800 /*
70801 * We later require that vma->vm_flags == vm_flags,
70802 * so this tests vma->vm_flags & VM_SPECIAL, too.
70803@@ -773,6 +814,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70804 if (next && next->vm_end == end) /* cases 6, 7, 8 */
70805 next = next->vm_next;
70806
70807+#ifdef CONFIG_PAX_SEGMEXEC
70808+ if (prev)
70809+ prev_m = pax_find_mirror_vma(prev);
70810+ if (area)
70811+ area_m = pax_find_mirror_vma(area);
70812+ if (next)
70813+ next_m = pax_find_mirror_vma(next);
70814+#endif
70815+
70816 /*
70817 * Can it merge with the predecessor?
70818 */
70819@@ -792,9 +842,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70820 /* cases 1, 6 */
70821 err = vma_adjust(prev, prev->vm_start,
70822 next->vm_end, prev->vm_pgoff, NULL);
70823- } else /* cases 2, 5, 7 */
70824+
70825+#ifdef CONFIG_PAX_SEGMEXEC
70826+ if (!err && prev_m)
70827+ err = vma_adjust(prev_m, prev_m->vm_start,
70828+ next_m->vm_end, prev_m->vm_pgoff, NULL);
70829+#endif
70830+
70831+ } else { /* cases 2, 5, 7 */
70832 err = vma_adjust(prev, prev->vm_start,
70833 end, prev->vm_pgoff, NULL);
70834+
70835+#ifdef CONFIG_PAX_SEGMEXEC
70836+ if (!err && prev_m)
70837+ err = vma_adjust(prev_m, prev_m->vm_start,
70838+ end_m, prev_m->vm_pgoff, NULL);
70839+#endif
70840+
70841+ }
70842 if (err)
70843 return NULL;
70844 khugepaged_enter_vma_merge(prev);
70845@@ -808,12 +873,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70846 mpol_equal(policy, vma_policy(next)) &&
70847 can_vma_merge_before(next, vm_flags,
70848 anon_vma, file, pgoff+pglen)) {
70849- if (prev && addr < prev->vm_end) /* case 4 */
70850+ if (prev && addr < prev->vm_end) { /* case 4 */
70851 err = vma_adjust(prev, prev->vm_start,
70852 addr, prev->vm_pgoff, NULL);
70853- else /* cases 3, 8 */
70854+
70855+#ifdef CONFIG_PAX_SEGMEXEC
70856+ if (!err && prev_m)
70857+ err = vma_adjust(prev_m, prev_m->vm_start,
70858+ addr_m, prev_m->vm_pgoff, NULL);
70859+#endif
70860+
70861+ } else { /* cases 3, 8 */
70862 err = vma_adjust(area, addr, next->vm_end,
70863 next->vm_pgoff - pglen, NULL);
70864+
70865+#ifdef CONFIG_PAX_SEGMEXEC
70866+ if (!err && area_m)
70867+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
70868+ next_m->vm_pgoff - pglen, NULL);
70869+#endif
70870+
70871+ }
70872 if (err)
70873 return NULL;
70874 khugepaged_enter_vma_merge(area);
70875@@ -922,14 +1002,11 @@ none:
70876 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
70877 struct file *file, long pages)
70878 {
70879- const unsigned long stack_flags
70880- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
70881-
70882 if (file) {
70883 mm->shared_vm += pages;
70884 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
70885 mm->exec_vm += pages;
70886- } else if (flags & stack_flags)
70887+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
70888 mm->stack_vm += pages;
70889 if (flags & (VM_RESERVED|VM_IO))
70890 mm->reserved_vm += pages;
70891@@ -969,7 +1046,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70892 * (the exception is when the underlying filesystem is noexec
70893 * mounted, in which case we dont add PROT_EXEC.)
70894 */
70895- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
70896+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
70897 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
70898 prot |= PROT_EXEC;
70899
70900@@ -995,7 +1072,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70901 /* Obtain the address to map to. we verify (or select) it and ensure
70902 * that it represents a valid section of the address space.
70903 */
70904- addr = get_unmapped_area(file, addr, len, pgoff, flags);
70905+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
70906 if (addr & ~PAGE_MASK)
70907 return addr;
70908
70909@@ -1006,6 +1083,36 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70910 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
70911 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
70912
70913+#ifdef CONFIG_PAX_MPROTECT
70914+ if (mm->pax_flags & MF_PAX_MPROTECT) {
70915+#ifndef CONFIG_PAX_MPROTECT_COMPAT
70916+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
70917+ gr_log_rwxmmap(file);
70918+
70919+#ifdef CONFIG_PAX_EMUPLT
70920+ vm_flags &= ~VM_EXEC;
70921+#else
70922+ return -EPERM;
70923+#endif
70924+
70925+ }
70926+
70927+ if (!(vm_flags & VM_EXEC))
70928+ vm_flags &= ~VM_MAYEXEC;
70929+#else
70930+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
70931+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
70932+#endif
70933+ else
70934+ vm_flags &= ~VM_MAYWRITE;
70935+ }
70936+#endif
70937+
70938+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70939+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
70940+ vm_flags &= ~VM_PAGEEXEC;
70941+#endif
70942+
70943 if (flags & MAP_LOCKED)
70944 if (!can_do_mlock())
70945 return -EPERM;
70946@@ -1017,6 +1124,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70947 locked += mm->locked_vm;
70948 lock_limit = rlimit(RLIMIT_MEMLOCK);
70949 lock_limit >>= PAGE_SHIFT;
70950+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
70951 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
70952 return -EAGAIN;
70953 }
70954@@ -1087,6 +1195,9 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70955 if (error)
70956 return error;
70957
70958+ if (!gr_acl_handle_mmap(file, prot))
70959+ return -EACCES;
70960+
70961 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
70962 }
70963
70964@@ -1192,7 +1303,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
70965 vm_flags_t vm_flags = vma->vm_flags;
70966
70967 /* If it was private or non-writable, the write bit is already clear */
70968- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
70969+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
70970 return 0;
70971
70972 /* The backer wishes to know when pages are first written to? */
70973@@ -1241,14 +1352,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
70974 unsigned long charged = 0;
70975 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
70976
70977+#ifdef CONFIG_PAX_SEGMEXEC
70978+ struct vm_area_struct *vma_m = NULL;
70979+#endif
70980+
70981+ /*
70982+ * mm->mmap_sem is required to protect against another thread
70983+ * changing the mappings in case we sleep.
70984+ */
70985+ verify_mm_writelocked(mm);
70986+
70987 /* Clear old maps */
70988 error = -ENOMEM;
70989-munmap_back:
70990 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70991 if (vma && vma->vm_start < addr + len) {
70992 if (do_munmap(mm, addr, len))
70993 return -ENOMEM;
70994- goto munmap_back;
70995+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70996+ BUG_ON(vma && vma->vm_start < addr + len);
70997 }
70998
70999 /* Check against address space limit. */
71000@@ -1297,6 +1418,16 @@ munmap_back:
71001 goto unacct_error;
71002 }
71003
71004+#ifdef CONFIG_PAX_SEGMEXEC
71005+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
71006+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71007+ if (!vma_m) {
71008+ error = -ENOMEM;
71009+ goto free_vma;
71010+ }
71011+ }
71012+#endif
71013+
71014 vma->vm_mm = mm;
71015 vma->vm_start = addr;
71016 vma->vm_end = addr + len;
71017@@ -1321,6 +1452,19 @@ munmap_back:
71018 error = file->f_op->mmap(file, vma);
71019 if (error)
71020 goto unmap_and_free_vma;
71021+
71022+#ifdef CONFIG_PAX_SEGMEXEC
71023+ if (vma_m && (vm_flags & VM_EXECUTABLE))
71024+ added_exe_file_vma(mm);
71025+#endif
71026+
71027+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71028+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
71029+ vma->vm_flags |= VM_PAGEEXEC;
71030+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
71031+ }
71032+#endif
71033+
71034 if (vm_flags & VM_EXECUTABLE)
71035 added_exe_file_vma(mm);
71036
71037@@ -1358,6 +1502,11 @@ munmap_back:
71038 vma_link(mm, vma, prev, rb_link, rb_parent);
71039 file = vma->vm_file;
71040
71041+#ifdef CONFIG_PAX_SEGMEXEC
71042+ if (vma_m)
71043+ BUG_ON(pax_mirror_vma(vma_m, vma));
71044+#endif
71045+
71046 /* Once vma denies write, undo our temporary denial count */
71047 if (correct_wcount)
71048 atomic_inc(&inode->i_writecount);
71049@@ -1366,6 +1515,7 @@ out:
71050
71051 mm->total_vm += len >> PAGE_SHIFT;
71052 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
71053+ track_exec_limit(mm, addr, addr + len, vm_flags);
71054 if (vm_flags & VM_LOCKED) {
71055 if (!mlock_vma_pages_range(vma, addr, addr + len))
71056 mm->locked_vm += (len >> PAGE_SHIFT);
71057@@ -1383,6 +1533,12 @@ unmap_and_free_vma:
71058 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
71059 charged = 0;
71060 free_vma:
71061+
71062+#ifdef CONFIG_PAX_SEGMEXEC
71063+ if (vma_m)
71064+ kmem_cache_free(vm_area_cachep, vma_m);
71065+#endif
71066+
71067 kmem_cache_free(vm_area_cachep, vma);
71068 unacct_error:
71069 if (charged)
71070@@ -1390,6 +1546,44 @@ unacct_error:
71071 return error;
71072 }
71073
71074+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
71075+{
71076+ if (!vma) {
71077+#ifdef CONFIG_STACK_GROWSUP
71078+ if (addr > sysctl_heap_stack_gap)
71079+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
71080+ else
71081+ vma = find_vma(current->mm, 0);
71082+ if (vma && (vma->vm_flags & VM_GROWSUP))
71083+ return false;
71084+#endif
71085+ return true;
71086+ }
71087+
71088+ if (addr + len > vma->vm_start)
71089+ return false;
71090+
71091+ if (vma->vm_flags & VM_GROWSDOWN)
71092+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
71093+#ifdef CONFIG_STACK_GROWSUP
71094+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
71095+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
71096+#endif
71097+
71098+ return true;
71099+}
71100+
71101+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
71102+{
71103+ if (vma->vm_start < len)
71104+ return -ENOMEM;
71105+ if (!(vma->vm_flags & VM_GROWSDOWN))
71106+ return vma->vm_start - len;
71107+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
71108+ return vma->vm_start - len - sysctl_heap_stack_gap;
71109+ return -ENOMEM;
71110+}
71111+
71112 /* Get an address range which is currently unmapped.
71113 * For shmat() with addr=0.
71114 *
71115@@ -1416,18 +1610,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
71116 if (flags & MAP_FIXED)
71117 return addr;
71118
71119+#ifdef CONFIG_PAX_RANDMMAP
71120+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71121+#endif
71122+
71123 if (addr) {
71124 addr = PAGE_ALIGN(addr);
71125- vma = find_vma(mm, addr);
71126- if (TASK_SIZE - len >= addr &&
71127- (!vma || addr + len <= vma->vm_start))
71128- return addr;
71129+ if (TASK_SIZE - len >= addr) {
71130+ vma = find_vma(mm, addr);
71131+ if (check_heap_stack_gap(vma, addr, len))
71132+ return addr;
71133+ }
71134 }
71135 if (len > mm->cached_hole_size) {
71136- start_addr = addr = mm->free_area_cache;
71137+ start_addr = addr = mm->free_area_cache;
71138 } else {
71139- start_addr = addr = TASK_UNMAPPED_BASE;
71140- mm->cached_hole_size = 0;
71141+ start_addr = addr = mm->mmap_base;
71142+ mm->cached_hole_size = 0;
71143 }
71144
71145 full_search:
71146@@ -1438,34 +1637,40 @@ full_search:
71147 * Start a new search - just in case we missed
71148 * some holes.
71149 */
71150- if (start_addr != TASK_UNMAPPED_BASE) {
71151- addr = TASK_UNMAPPED_BASE;
71152- start_addr = addr;
71153+ if (start_addr != mm->mmap_base) {
71154+ start_addr = addr = mm->mmap_base;
71155 mm->cached_hole_size = 0;
71156 goto full_search;
71157 }
71158 return -ENOMEM;
71159 }
71160- if (!vma || addr + len <= vma->vm_start) {
71161- /*
71162- * Remember the place where we stopped the search:
71163- */
71164- mm->free_area_cache = addr + len;
71165- return addr;
71166- }
71167+ if (check_heap_stack_gap(vma, addr, len))
71168+ break;
71169 if (addr + mm->cached_hole_size < vma->vm_start)
71170 mm->cached_hole_size = vma->vm_start - addr;
71171 addr = vma->vm_end;
71172 }
71173+
71174+ /*
71175+ * Remember the place where we stopped the search:
71176+ */
71177+ mm->free_area_cache = addr + len;
71178+ return addr;
71179 }
71180 #endif
71181
71182 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
71183 {
71184+
71185+#ifdef CONFIG_PAX_SEGMEXEC
71186+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71187+ return;
71188+#endif
71189+
71190 /*
71191 * Is this a new hole at the lowest possible address?
71192 */
71193- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
71194+ if (addr >= mm->mmap_base && addr < mm->free_area_cache)
71195 mm->free_area_cache = addr;
71196 }
71197
71198@@ -1481,7 +1686,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71199 {
71200 struct vm_area_struct *vma;
71201 struct mm_struct *mm = current->mm;
71202- unsigned long addr = addr0, start_addr;
71203+ unsigned long base = mm->mmap_base, addr = addr0, start_addr;
71204
71205 /* requested length too big for entire address space */
71206 if (len > TASK_SIZE)
71207@@ -1490,13 +1695,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71208 if (flags & MAP_FIXED)
71209 return addr;
71210
71211+#ifdef CONFIG_PAX_RANDMMAP
71212+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71213+#endif
71214+
71215 /* requesting a specific address */
71216 if (addr) {
71217 addr = PAGE_ALIGN(addr);
71218- vma = find_vma(mm, addr);
71219- if (TASK_SIZE - len >= addr &&
71220- (!vma || addr + len <= vma->vm_start))
71221- return addr;
71222+ if (TASK_SIZE - len >= addr) {
71223+ vma = find_vma(mm, addr);
71224+ if (check_heap_stack_gap(vma, addr, len))
71225+ return addr;
71226+ }
71227 }
71228
71229 /* check if free_area_cache is useful for us */
71230@@ -1520,7 +1730,7 @@ try_again:
71231 * return with success:
71232 */
71233 vma = find_vma(mm, addr);
71234- if (!vma || addr+len <= vma->vm_start)
71235+ if (check_heap_stack_gap(vma, addr, len))
71236 /* remember the address as a hint for next time */
71237 return (mm->free_area_cache = addr);
71238
71239@@ -1529,8 +1739,8 @@ try_again:
71240 mm->cached_hole_size = vma->vm_start - addr;
71241
71242 /* try just below the current vma->vm_start */
71243- addr = vma->vm_start-len;
71244- } while (len < vma->vm_start);
71245+ addr = skip_heap_stack_gap(vma, len);
71246+ } while (!IS_ERR_VALUE(addr));
71247
71248 fail:
71249 /*
71250@@ -1553,13 +1763,21 @@ fail:
71251 * can happen with large stack limits and large mmap()
71252 * allocations.
71253 */
71254+ mm->mmap_base = TASK_UNMAPPED_BASE;
71255+
71256+#ifdef CONFIG_PAX_RANDMMAP
71257+ if (mm->pax_flags & MF_PAX_RANDMMAP)
71258+ mm->mmap_base += mm->delta_mmap;
71259+#endif
71260+
71261+ mm->free_area_cache = mm->mmap_base;
71262 mm->cached_hole_size = ~0UL;
71263- mm->free_area_cache = TASK_UNMAPPED_BASE;
71264 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
71265 /*
71266 * Restore the topdown base:
71267 */
71268- mm->free_area_cache = mm->mmap_base;
71269+ mm->mmap_base = base;
71270+ mm->free_area_cache = base;
71271 mm->cached_hole_size = ~0UL;
71272
71273 return addr;
71274@@ -1568,6 +1786,12 @@ fail:
71275
71276 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71277 {
71278+
71279+#ifdef CONFIG_PAX_SEGMEXEC
71280+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71281+ return;
71282+#endif
71283+
71284 /*
71285 * Is this a new hole at the highest possible address?
71286 */
71287@@ -1575,8 +1799,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71288 mm->free_area_cache = addr;
71289
71290 /* dont allow allocations above current base */
71291- if (mm->free_area_cache > mm->mmap_base)
71292+ if (mm->free_area_cache > mm->mmap_base) {
71293 mm->free_area_cache = mm->mmap_base;
71294+ mm->cached_hole_size = ~0UL;
71295+ }
71296 }
71297
71298 unsigned long
71299@@ -1672,6 +1898,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
71300 return vma;
71301 }
71302
71303+#ifdef CONFIG_PAX_SEGMEXEC
71304+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
71305+{
71306+ struct vm_area_struct *vma_m;
71307+
71308+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
71309+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
71310+ BUG_ON(vma->vm_mirror);
71311+ return NULL;
71312+ }
71313+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
71314+ vma_m = vma->vm_mirror;
71315+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
71316+ BUG_ON(vma->vm_file != vma_m->vm_file);
71317+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
71318+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
71319+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
71320+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
71321+ return vma_m;
71322+}
71323+#endif
71324+
71325 /*
71326 * Verify that the stack growth is acceptable and
71327 * update accounting. This is shared with both the
71328@@ -1688,6 +1936,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71329 return -ENOMEM;
71330
71331 /* Stack limit test */
71332+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
71333 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
71334 return -ENOMEM;
71335
71336@@ -1698,6 +1947,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71337 locked = mm->locked_vm + grow;
71338 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
71339 limit >>= PAGE_SHIFT;
71340+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
71341 if (locked > limit && !capable(CAP_IPC_LOCK))
71342 return -ENOMEM;
71343 }
71344@@ -1728,37 +1978,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71345 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
71346 * vma is the last one with address > vma->vm_end. Have to extend vma.
71347 */
71348+#ifndef CONFIG_IA64
71349+static
71350+#endif
71351 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
71352 {
71353 int error;
71354+ bool locknext;
71355
71356 if (!(vma->vm_flags & VM_GROWSUP))
71357 return -EFAULT;
71358
71359+ /* Also guard against wrapping around to address 0. */
71360+ if (address < PAGE_ALIGN(address+1))
71361+ address = PAGE_ALIGN(address+1);
71362+ else
71363+ return -ENOMEM;
71364+
71365 /*
71366 * We must make sure the anon_vma is allocated
71367 * so that the anon_vma locking is not a noop.
71368 */
71369 if (unlikely(anon_vma_prepare(vma)))
71370 return -ENOMEM;
71371+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
71372+ if (locknext && anon_vma_prepare(vma->vm_next))
71373+ return -ENOMEM;
71374 vma_lock_anon_vma(vma);
71375+ if (locknext)
71376+ vma_lock_anon_vma(vma->vm_next);
71377
71378 /*
71379 * vma->vm_start/vm_end cannot change under us because the caller
71380 * is required to hold the mmap_sem in read mode. We need the
71381- * anon_vma lock to serialize against concurrent expand_stacks.
71382- * Also guard against wrapping around to address 0.
71383+ * anon_vma locks to serialize against concurrent expand_stacks
71384+ * and expand_upwards.
71385 */
71386- if (address < PAGE_ALIGN(address+4))
71387- address = PAGE_ALIGN(address+4);
71388- else {
71389- vma_unlock_anon_vma(vma);
71390- return -ENOMEM;
71391- }
71392 error = 0;
71393
71394 /* Somebody else might have raced and expanded it already */
71395- if (address > vma->vm_end) {
71396+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
71397+ error = -ENOMEM;
71398+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
71399 unsigned long size, grow;
71400
71401 size = address - vma->vm_start;
71402@@ -1773,6 +2034,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
71403 }
71404 }
71405 }
71406+ if (locknext)
71407+ vma_unlock_anon_vma(vma->vm_next);
71408 vma_unlock_anon_vma(vma);
71409 khugepaged_enter_vma_merge(vma);
71410 return error;
71411@@ -1786,6 +2049,8 @@ int expand_downwards(struct vm_area_struct *vma,
71412 unsigned long address)
71413 {
71414 int error;
71415+ bool lockprev = false;
71416+ struct vm_area_struct *prev;
71417
71418 /*
71419 * We must make sure the anon_vma is allocated
71420@@ -1799,6 +2064,15 @@ int expand_downwards(struct vm_area_struct *vma,
71421 if (error)
71422 return error;
71423
71424+ prev = vma->vm_prev;
71425+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
71426+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
71427+#endif
71428+ if (lockprev && anon_vma_prepare(prev))
71429+ return -ENOMEM;
71430+ if (lockprev)
71431+ vma_lock_anon_vma(prev);
71432+
71433 vma_lock_anon_vma(vma);
71434
71435 /*
71436@@ -1808,9 +2082,17 @@ int expand_downwards(struct vm_area_struct *vma,
71437 */
71438
71439 /* Somebody else might have raced and expanded it already */
71440- if (address < vma->vm_start) {
71441+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
71442+ error = -ENOMEM;
71443+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
71444 unsigned long size, grow;
71445
71446+#ifdef CONFIG_PAX_SEGMEXEC
71447+ struct vm_area_struct *vma_m;
71448+
71449+ vma_m = pax_find_mirror_vma(vma);
71450+#endif
71451+
71452 size = vma->vm_end - address;
71453 grow = (vma->vm_start - address) >> PAGE_SHIFT;
71454
71455@@ -1820,11 +2102,22 @@ int expand_downwards(struct vm_area_struct *vma,
71456 if (!error) {
71457 vma->vm_start = address;
71458 vma->vm_pgoff -= grow;
71459+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
71460+
71461+#ifdef CONFIG_PAX_SEGMEXEC
71462+ if (vma_m) {
71463+ vma_m->vm_start -= grow << PAGE_SHIFT;
71464+ vma_m->vm_pgoff -= grow;
71465+ }
71466+#endif
71467+
71468 perf_event_mmap(vma);
71469 }
71470 }
71471 }
71472 vma_unlock_anon_vma(vma);
71473+ if (lockprev)
71474+ vma_unlock_anon_vma(prev);
71475 khugepaged_enter_vma_merge(vma);
71476 return error;
71477 }
71478@@ -1894,6 +2187,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
71479 do {
71480 long nrpages = vma_pages(vma);
71481
71482+#ifdef CONFIG_PAX_SEGMEXEC
71483+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
71484+ vma = remove_vma(vma);
71485+ continue;
71486+ }
71487+#endif
71488+
71489 mm->total_vm -= nrpages;
71490 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
71491 vma = remove_vma(vma);
71492@@ -1939,6 +2239,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
71493 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
71494 vma->vm_prev = NULL;
71495 do {
71496+
71497+#ifdef CONFIG_PAX_SEGMEXEC
71498+ if (vma->vm_mirror) {
71499+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
71500+ vma->vm_mirror->vm_mirror = NULL;
71501+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
71502+ vma->vm_mirror = NULL;
71503+ }
71504+#endif
71505+
71506 rb_erase(&vma->vm_rb, &mm->mm_rb);
71507 mm->map_count--;
71508 tail_vma = vma;
71509@@ -1967,14 +2277,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71510 struct vm_area_struct *new;
71511 int err = -ENOMEM;
71512
71513+#ifdef CONFIG_PAX_SEGMEXEC
71514+ struct vm_area_struct *vma_m, *new_m = NULL;
71515+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
71516+#endif
71517+
71518 if (is_vm_hugetlb_page(vma) && (addr &
71519 ~(huge_page_mask(hstate_vma(vma)))))
71520 return -EINVAL;
71521
71522+#ifdef CONFIG_PAX_SEGMEXEC
71523+ vma_m = pax_find_mirror_vma(vma);
71524+#endif
71525+
71526 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
71527 if (!new)
71528 goto out_err;
71529
71530+#ifdef CONFIG_PAX_SEGMEXEC
71531+ if (vma_m) {
71532+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
71533+ if (!new_m) {
71534+ kmem_cache_free(vm_area_cachep, new);
71535+ goto out_err;
71536+ }
71537+ }
71538+#endif
71539+
71540 /* most fields are the same, copy all, and then fixup */
71541 *new = *vma;
71542
71543@@ -1987,6 +2316,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71544 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
71545 }
71546
71547+#ifdef CONFIG_PAX_SEGMEXEC
71548+ if (vma_m) {
71549+ *new_m = *vma_m;
71550+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
71551+ new_m->vm_mirror = new;
71552+ new->vm_mirror = new_m;
71553+
71554+ if (new_below)
71555+ new_m->vm_end = addr_m;
71556+ else {
71557+ new_m->vm_start = addr_m;
71558+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
71559+ }
71560+ }
71561+#endif
71562+
71563 pol = mpol_dup(vma_policy(vma));
71564 if (IS_ERR(pol)) {
71565 err = PTR_ERR(pol);
71566@@ -2012,6 +2357,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71567 else
71568 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
71569
71570+#ifdef CONFIG_PAX_SEGMEXEC
71571+ if (!err && vma_m) {
71572+ if (anon_vma_clone(new_m, vma_m))
71573+ goto out_free_mpol;
71574+
71575+ mpol_get(pol);
71576+ vma_set_policy(new_m, pol);
71577+
71578+ if (new_m->vm_file) {
71579+ get_file(new_m->vm_file);
71580+ if (vma_m->vm_flags & VM_EXECUTABLE)
71581+ added_exe_file_vma(mm);
71582+ }
71583+
71584+ if (new_m->vm_ops && new_m->vm_ops->open)
71585+ new_m->vm_ops->open(new_m);
71586+
71587+ if (new_below)
71588+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
71589+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
71590+ else
71591+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
71592+
71593+ if (err) {
71594+ if (new_m->vm_ops && new_m->vm_ops->close)
71595+ new_m->vm_ops->close(new_m);
71596+ if (new_m->vm_file) {
71597+ if (vma_m->vm_flags & VM_EXECUTABLE)
71598+ removed_exe_file_vma(mm);
71599+ fput(new_m->vm_file);
71600+ }
71601+ mpol_put(pol);
71602+ }
71603+ }
71604+#endif
71605+
71606 /* Success. */
71607 if (!err)
71608 return 0;
71609@@ -2024,10 +2405,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71610 removed_exe_file_vma(mm);
71611 fput(new->vm_file);
71612 }
71613- unlink_anon_vmas(new);
71614 out_free_mpol:
71615 mpol_put(pol);
71616 out_free_vma:
71617+
71618+#ifdef CONFIG_PAX_SEGMEXEC
71619+ if (new_m) {
71620+ unlink_anon_vmas(new_m);
71621+ kmem_cache_free(vm_area_cachep, new_m);
71622+ }
71623+#endif
71624+
71625+ unlink_anon_vmas(new);
71626 kmem_cache_free(vm_area_cachep, new);
71627 out_err:
71628 return err;
71629@@ -2040,6 +2429,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71630 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
71631 unsigned long addr, int new_below)
71632 {
71633+
71634+#ifdef CONFIG_PAX_SEGMEXEC
71635+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
71636+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
71637+ if (mm->map_count >= sysctl_max_map_count-1)
71638+ return -ENOMEM;
71639+ } else
71640+#endif
71641+
71642 if (mm->map_count >= sysctl_max_map_count)
71643 return -ENOMEM;
71644
71645@@ -2051,11 +2449,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
71646 * work. This now handles partial unmappings.
71647 * Jeremy Fitzhardinge <jeremy@goop.org>
71648 */
71649+#ifdef CONFIG_PAX_SEGMEXEC
71650 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71651 {
71652+ int ret = __do_munmap(mm, start, len);
71653+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
71654+ return ret;
71655+
71656+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
71657+}
71658+
71659+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71660+#else
71661+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71662+#endif
71663+{
71664 unsigned long end;
71665 struct vm_area_struct *vma, *prev, *last;
71666
71667+ /*
71668+ * mm->mmap_sem is required to protect against another thread
71669+ * changing the mappings in case we sleep.
71670+ */
71671+ verify_mm_writelocked(mm);
71672+
71673 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
71674 return -EINVAL;
71675
71676@@ -2130,6 +2547,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71677 /* Fix up all other VM information */
71678 remove_vma_list(mm, vma);
71679
71680+ track_exec_limit(mm, start, end, 0UL);
71681+
71682 return 0;
71683 }
71684 EXPORT_SYMBOL(do_munmap);
71685@@ -2139,6 +2558,13 @@ int vm_munmap(unsigned long start, size_t len)
71686 int ret;
71687 struct mm_struct *mm = current->mm;
71688
71689+
71690+#ifdef CONFIG_PAX_SEGMEXEC
71691+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
71692+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
71693+ return -EINVAL;
71694+#endif
71695+
71696 down_write(&mm->mmap_sem);
71697 ret = do_munmap(mm, start, len);
71698 up_write(&mm->mmap_sem);
71699@@ -2152,16 +2578,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
71700 return vm_munmap(addr, len);
71701 }
71702
71703-static inline void verify_mm_writelocked(struct mm_struct *mm)
71704-{
71705-#ifdef CONFIG_DEBUG_VM
71706- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
71707- WARN_ON(1);
71708- up_read(&mm->mmap_sem);
71709- }
71710-#endif
71711-}
71712-
71713 /*
71714 * this is really a simplified "do_mmap". it only handles
71715 * anonymous maps. eventually we may be able to do some
71716@@ -2175,6 +2591,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
71717 struct rb_node ** rb_link, * rb_parent;
71718 pgoff_t pgoff = addr >> PAGE_SHIFT;
71719 int error;
71720+ unsigned long charged;
71721
71722 len = PAGE_ALIGN(len);
71723 if (!len)
71724@@ -2186,16 +2603,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
71725
71726 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
71727
71728+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
71729+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
71730+ flags &= ~VM_EXEC;
71731+
71732+#ifdef CONFIG_PAX_MPROTECT
71733+ if (mm->pax_flags & MF_PAX_MPROTECT)
71734+ flags &= ~VM_MAYEXEC;
71735+#endif
71736+
71737+ }
71738+#endif
71739+
71740 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
71741 if (error & ~PAGE_MASK)
71742 return error;
71743
71744+ charged = len >> PAGE_SHIFT;
71745+
71746 /*
71747 * mlock MCL_FUTURE?
71748 */
71749 if (mm->def_flags & VM_LOCKED) {
71750 unsigned long locked, lock_limit;
71751- locked = len >> PAGE_SHIFT;
71752+ locked = charged;
71753 locked += mm->locked_vm;
71754 lock_limit = rlimit(RLIMIT_MEMLOCK);
71755 lock_limit >>= PAGE_SHIFT;
71756@@ -2212,22 +2643,22 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
71757 /*
71758 * Clear old maps. this also does some error checking for us
71759 */
71760- munmap_back:
71761 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71762 if (vma && vma->vm_start < addr + len) {
71763 if (do_munmap(mm, addr, len))
71764 return -ENOMEM;
71765- goto munmap_back;
71766- }
71767+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71768+ BUG_ON(vma && vma->vm_start < addr + len);
71769+ }
71770
71771 /* Check against address space limits *after* clearing old maps... */
71772- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
71773+ if (!may_expand_vm(mm, charged))
71774 return -ENOMEM;
71775
71776 if (mm->map_count > sysctl_max_map_count)
71777 return -ENOMEM;
71778
71779- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
71780+ if (security_vm_enough_memory_mm(mm, charged))
71781 return -ENOMEM;
71782
71783 /* Can we just expand an old private anonymous mapping? */
71784@@ -2241,7 +2672,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
71785 */
71786 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71787 if (!vma) {
71788- vm_unacct_memory(len >> PAGE_SHIFT);
71789+ vm_unacct_memory(charged);
71790 return -ENOMEM;
71791 }
71792
71793@@ -2255,11 +2686,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
71794 vma_link(mm, vma, prev, rb_link, rb_parent);
71795 out:
71796 perf_event_mmap(vma);
71797- mm->total_vm += len >> PAGE_SHIFT;
71798+ mm->total_vm += charged;
71799 if (flags & VM_LOCKED) {
71800 if (!mlock_vma_pages_range(vma, addr, addr + len))
71801- mm->locked_vm += (len >> PAGE_SHIFT);
71802+ mm->locked_vm += charged;
71803 }
71804+ track_exec_limit(mm, addr, addr + len, flags);
71805 return addr;
71806 }
71807
71808@@ -2315,8 +2747,10 @@ void exit_mmap(struct mm_struct *mm)
71809 * Walk the list again, actually closing and freeing it,
71810 * with preemption enabled, without holding any MM locks.
71811 */
71812- while (vma)
71813+ while (vma) {
71814+ vma->vm_mirror = NULL;
71815 vma = remove_vma(vma);
71816+ }
71817
71818 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
71819 }
71820@@ -2330,6 +2764,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
71821 struct vm_area_struct * __vma, * prev;
71822 struct rb_node ** rb_link, * rb_parent;
71823
71824+#ifdef CONFIG_PAX_SEGMEXEC
71825+ struct vm_area_struct *vma_m = NULL;
71826+#endif
71827+
71828+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
71829+ return -EPERM;
71830+
71831 /*
71832 * The vm_pgoff of a purely anonymous vma should be irrelevant
71833 * until its first write fault, when page's anon_vma and index
71834@@ -2352,7 +2793,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
71835 if ((vma->vm_flags & VM_ACCOUNT) &&
71836 security_vm_enough_memory_mm(mm, vma_pages(vma)))
71837 return -ENOMEM;
71838+
71839+#ifdef CONFIG_PAX_SEGMEXEC
71840+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
71841+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71842+ if (!vma_m)
71843+ return -ENOMEM;
71844+ }
71845+#endif
71846+
71847 vma_link(mm, vma, prev, rb_link, rb_parent);
71848+
71849+#ifdef CONFIG_PAX_SEGMEXEC
71850+ if (vma_m)
71851+ BUG_ON(pax_mirror_vma(vma_m, vma));
71852+#endif
71853+
71854 return 0;
71855 }
71856
71857@@ -2371,6 +2827,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
71858 struct mempolicy *pol;
71859 bool faulted_in_anon_vma = true;
71860
71861+ BUG_ON(vma->vm_mirror);
71862+
71863 /*
71864 * If anonymous vma has not yet been faulted, update new pgoff
71865 * to match new location, to increase its chance of merging.
71866@@ -2438,6 +2896,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
71867 return NULL;
71868 }
71869
71870+#ifdef CONFIG_PAX_SEGMEXEC
71871+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
71872+{
71873+ struct vm_area_struct *prev_m;
71874+ struct rb_node **rb_link_m, *rb_parent_m;
71875+ struct mempolicy *pol_m;
71876+
71877+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
71878+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
71879+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
71880+ *vma_m = *vma;
71881+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
71882+ if (anon_vma_clone(vma_m, vma))
71883+ return -ENOMEM;
71884+ pol_m = vma_policy(vma_m);
71885+ mpol_get(pol_m);
71886+ vma_set_policy(vma_m, pol_m);
71887+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
71888+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
71889+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
71890+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
71891+ if (vma_m->vm_file)
71892+ get_file(vma_m->vm_file);
71893+ if (vma_m->vm_ops && vma_m->vm_ops->open)
71894+ vma_m->vm_ops->open(vma_m);
71895+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
71896+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
71897+ vma_m->vm_mirror = vma;
71898+ vma->vm_mirror = vma_m;
71899+ return 0;
71900+}
71901+#endif
71902+
71903 /*
71904 * Return true if the calling process may expand its vm space by the passed
71905 * number of pages
71906@@ -2449,6 +2940,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
71907
71908 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
71909
71910+#ifdef CONFIG_PAX_RANDMMAP
71911+ if (mm->pax_flags & MF_PAX_RANDMMAP)
71912+ cur -= mm->brk_gap;
71913+#endif
71914+
71915+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
71916 if (cur + npages > lim)
71917 return 0;
71918 return 1;
71919@@ -2519,6 +3016,22 @@ int install_special_mapping(struct mm_struct *mm,
71920 vma->vm_start = addr;
71921 vma->vm_end = addr + len;
71922
71923+#ifdef CONFIG_PAX_MPROTECT
71924+ if (mm->pax_flags & MF_PAX_MPROTECT) {
71925+#ifndef CONFIG_PAX_MPROTECT_COMPAT
71926+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
71927+ return -EPERM;
71928+ if (!(vm_flags & VM_EXEC))
71929+ vm_flags &= ~VM_MAYEXEC;
71930+#else
71931+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
71932+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
71933+#endif
71934+ else
71935+ vm_flags &= ~VM_MAYWRITE;
71936+ }
71937+#endif
71938+
71939 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
71940 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
71941
71942diff --git a/mm/mprotect.c b/mm/mprotect.c
71943index a409926..8b32e6d 100644
71944--- a/mm/mprotect.c
71945+++ b/mm/mprotect.c
71946@@ -23,10 +23,17 @@
71947 #include <linux/mmu_notifier.h>
71948 #include <linux/migrate.h>
71949 #include <linux/perf_event.h>
71950+
71951+#ifdef CONFIG_PAX_MPROTECT
71952+#include <linux/elf.h>
71953+#include <linux/binfmts.h>
71954+#endif
71955+
71956 #include <asm/uaccess.h>
71957 #include <asm/pgtable.h>
71958 #include <asm/cacheflush.h>
71959 #include <asm/tlbflush.h>
71960+#include <asm/mmu_context.h>
71961
71962 #ifndef pgprot_modify
71963 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
71964@@ -141,6 +148,48 @@ static void change_protection(struct vm_area_struct *vma,
71965 flush_tlb_range(vma, start, end);
71966 }
71967
71968+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
71969+/* called while holding the mmap semaphor for writing except stack expansion */
71970+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
71971+{
71972+ unsigned long oldlimit, newlimit = 0UL;
71973+
71974+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
71975+ return;
71976+
71977+ spin_lock(&mm->page_table_lock);
71978+ oldlimit = mm->context.user_cs_limit;
71979+ if ((prot & VM_EXEC) && oldlimit < end)
71980+ /* USER_CS limit moved up */
71981+ newlimit = end;
71982+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
71983+ /* USER_CS limit moved down */
71984+ newlimit = start;
71985+
71986+ if (newlimit) {
71987+ mm->context.user_cs_limit = newlimit;
71988+
71989+#ifdef CONFIG_SMP
71990+ wmb();
71991+ cpus_clear(mm->context.cpu_user_cs_mask);
71992+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
71993+#endif
71994+
71995+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
71996+ }
71997+ spin_unlock(&mm->page_table_lock);
71998+ if (newlimit == end) {
71999+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
72000+
72001+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
72002+ if (is_vm_hugetlb_page(vma))
72003+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
72004+ else
72005+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
72006+ }
72007+}
72008+#endif
72009+
72010 int
72011 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72012 unsigned long start, unsigned long end, unsigned long newflags)
72013@@ -153,11 +202,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72014 int error;
72015 int dirty_accountable = 0;
72016
72017+#ifdef CONFIG_PAX_SEGMEXEC
72018+ struct vm_area_struct *vma_m = NULL;
72019+ unsigned long start_m, end_m;
72020+
72021+ start_m = start + SEGMEXEC_TASK_SIZE;
72022+ end_m = end + SEGMEXEC_TASK_SIZE;
72023+#endif
72024+
72025 if (newflags == oldflags) {
72026 *pprev = vma;
72027 return 0;
72028 }
72029
72030+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
72031+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
72032+
72033+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
72034+ return -ENOMEM;
72035+
72036+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
72037+ return -ENOMEM;
72038+ }
72039+
72040 /*
72041 * If we make a private mapping writable we increase our commit;
72042 * but (without finer accounting) cannot reduce our commit if we
72043@@ -174,6 +241,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72044 }
72045 }
72046
72047+#ifdef CONFIG_PAX_SEGMEXEC
72048+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
72049+ if (start != vma->vm_start) {
72050+ error = split_vma(mm, vma, start, 1);
72051+ if (error)
72052+ goto fail;
72053+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
72054+ *pprev = (*pprev)->vm_next;
72055+ }
72056+
72057+ if (end != vma->vm_end) {
72058+ error = split_vma(mm, vma, end, 0);
72059+ if (error)
72060+ goto fail;
72061+ }
72062+
72063+ if (pax_find_mirror_vma(vma)) {
72064+ error = __do_munmap(mm, start_m, end_m - start_m);
72065+ if (error)
72066+ goto fail;
72067+ } else {
72068+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72069+ if (!vma_m) {
72070+ error = -ENOMEM;
72071+ goto fail;
72072+ }
72073+ vma->vm_flags = newflags;
72074+ error = pax_mirror_vma(vma_m, vma);
72075+ if (error) {
72076+ vma->vm_flags = oldflags;
72077+ goto fail;
72078+ }
72079+ }
72080+ }
72081+#endif
72082+
72083 /*
72084 * First try to merge with previous and/or next vma.
72085 */
72086@@ -204,9 +307,21 @@ success:
72087 * vm_flags and vm_page_prot are protected by the mmap_sem
72088 * held in write mode.
72089 */
72090+
72091+#ifdef CONFIG_PAX_SEGMEXEC
72092+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
72093+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
72094+#endif
72095+
72096 vma->vm_flags = newflags;
72097+
72098+#ifdef CONFIG_PAX_MPROTECT
72099+ if (mm->binfmt && mm->binfmt->handle_mprotect)
72100+ mm->binfmt->handle_mprotect(vma, newflags);
72101+#endif
72102+
72103 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
72104- vm_get_page_prot(newflags));
72105+ vm_get_page_prot(vma->vm_flags));
72106
72107 if (vma_wants_writenotify(vma)) {
72108 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
72109@@ -248,6 +363,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72110 end = start + len;
72111 if (end <= start)
72112 return -ENOMEM;
72113+
72114+#ifdef CONFIG_PAX_SEGMEXEC
72115+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
72116+ if (end > SEGMEXEC_TASK_SIZE)
72117+ return -EINVAL;
72118+ } else
72119+#endif
72120+
72121+ if (end > TASK_SIZE)
72122+ return -EINVAL;
72123+
72124 if (!arch_validate_prot(prot))
72125 return -EINVAL;
72126
72127@@ -255,7 +381,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72128 /*
72129 * Does the application expect PROT_READ to imply PROT_EXEC:
72130 */
72131- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
72132+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
72133 prot |= PROT_EXEC;
72134
72135 vm_flags = calc_vm_prot_bits(prot);
72136@@ -288,6 +414,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72137 if (start > vma->vm_start)
72138 prev = vma;
72139
72140+#ifdef CONFIG_PAX_MPROTECT
72141+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
72142+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
72143+#endif
72144+
72145 for (nstart = start ; ; ) {
72146 unsigned long newflags;
72147
72148@@ -297,6 +428,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72149
72150 /* newflags >> 4 shift VM_MAY% in place of VM_% */
72151 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
72152+ if (prot & (PROT_WRITE | PROT_EXEC))
72153+ gr_log_rwxmprotect(vma->vm_file);
72154+
72155+ error = -EACCES;
72156+ goto out;
72157+ }
72158+
72159+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
72160 error = -EACCES;
72161 goto out;
72162 }
72163@@ -311,6 +450,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72164 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
72165 if (error)
72166 goto out;
72167+
72168+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
72169+
72170 nstart = tmp;
72171
72172 if (nstart < prev->vm_end)
72173diff --git a/mm/mremap.c b/mm/mremap.c
72174index db8d983..76506cb 100644
72175--- a/mm/mremap.c
72176+++ b/mm/mremap.c
72177@@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
72178 continue;
72179 pte = ptep_get_and_clear(mm, old_addr, old_pte);
72180 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
72181+
72182+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72183+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
72184+ pte = pte_exprotect(pte);
72185+#endif
72186+
72187 set_pte_at(mm, new_addr, new_pte, pte);
72188 }
72189
72190@@ -299,6 +305,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
72191 if (is_vm_hugetlb_page(vma))
72192 goto Einval;
72193
72194+#ifdef CONFIG_PAX_SEGMEXEC
72195+ if (pax_find_mirror_vma(vma))
72196+ goto Einval;
72197+#endif
72198+
72199 /* We can't remap across vm area boundaries */
72200 if (old_len > vma->vm_end - addr)
72201 goto Efault;
72202@@ -355,20 +366,25 @@ static unsigned long mremap_to(unsigned long addr,
72203 unsigned long ret = -EINVAL;
72204 unsigned long charged = 0;
72205 unsigned long map_flags;
72206+ unsigned long pax_task_size = TASK_SIZE;
72207
72208 if (new_addr & ~PAGE_MASK)
72209 goto out;
72210
72211- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
72212+#ifdef CONFIG_PAX_SEGMEXEC
72213+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
72214+ pax_task_size = SEGMEXEC_TASK_SIZE;
72215+#endif
72216+
72217+ pax_task_size -= PAGE_SIZE;
72218+
72219+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
72220 goto out;
72221
72222 /* Check if the location we're moving into overlaps the
72223 * old location at all, and fail if it does.
72224 */
72225- if ((new_addr <= addr) && (new_addr+new_len) > addr)
72226- goto out;
72227-
72228- if ((addr <= new_addr) && (addr+old_len) > new_addr)
72229+ if (addr + old_len > new_addr && new_addr + new_len > addr)
72230 goto out;
72231
72232 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72233@@ -440,6 +456,7 @@ unsigned long do_mremap(unsigned long addr,
72234 struct vm_area_struct *vma;
72235 unsigned long ret = -EINVAL;
72236 unsigned long charged = 0;
72237+ unsigned long pax_task_size = TASK_SIZE;
72238
72239 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
72240 goto out;
72241@@ -458,6 +475,17 @@ unsigned long do_mremap(unsigned long addr,
72242 if (!new_len)
72243 goto out;
72244
72245+#ifdef CONFIG_PAX_SEGMEXEC
72246+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
72247+ pax_task_size = SEGMEXEC_TASK_SIZE;
72248+#endif
72249+
72250+ pax_task_size -= PAGE_SIZE;
72251+
72252+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
72253+ old_len > pax_task_size || addr > pax_task_size-old_len)
72254+ goto out;
72255+
72256 if (flags & MREMAP_FIXED) {
72257 if (flags & MREMAP_MAYMOVE)
72258 ret = mremap_to(addr, old_len, new_addr, new_len);
72259@@ -507,6 +535,7 @@ unsigned long do_mremap(unsigned long addr,
72260 addr + new_len);
72261 }
72262 ret = addr;
72263+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
72264 goto out;
72265 }
72266 }
72267@@ -533,7 +562,13 @@ unsigned long do_mremap(unsigned long addr,
72268 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72269 if (ret)
72270 goto out;
72271+
72272+ map_flags = vma->vm_flags;
72273 ret = move_vma(vma, addr, old_len, new_len, new_addr);
72274+ if (!(ret & ~PAGE_MASK)) {
72275+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
72276+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
72277+ }
72278 }
72279 out:
72280 if (ret & ~PAGE_MASK)
72281diff --git a/mm/nommu.c b/mm/nommu.c
72282index bb8f4f0..40d3e02 100644
72283--- a/mm/nommu.c
72284+++ b/mm/nommu.c
72285@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
72286 int sysctl_overcommit_ratio = 50; /* default is 50% */
72287 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
72288 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
72289-int heap_stack_gap = 0;
72290
72291 atomic_long_t mmap_pages_allocated;
72292
72293@@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
72294 EXPORT_SYMBOL(find_vma);
72295
72296 /*
72297- * find a VMA
72298- * - we don't extend stack VMAs under NOMMU conditions
72299- */
72300-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
72301-{
72302- return find_vma(mm, addr);
72303-}
72304-
72305-/*
72306 * expand a stack to a given address
72307 * - not supported under NOMMU conditions
72308 */
72309@@ -1580,6 +1570,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72310
72311 /* most fields are the same, copy all, and then fixup */
72312 *new = *vma;
72313+ INIT_LIST_HEAD(&new->anon_vma_chain);
72314 *region = *vma->vm_region;
72315 new->vm_region = region;
72316
72317diff --git a/mm/page_alloc.c b/mm/page_alloc.c
72318index 918330f..ae99ae1 100644
72319--- a/mm/page_alloc.c
72320+++ b/mm/page_alloc.c
72321@@ -335,7 +335,7 @@ out:
72322 * This usage means that zero-order pages may not be compound.
72323 */
72324
72325-static void free_compound_page(struct page *page)
72326+void free_compound_page(struct page *page)
72327 {
72328 __free_pages_ok(page, compound_order(page));
72329 }
72330@@ -692,6 +692,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
72331 int i;
72332 int bad = 0;
72333
72334+#ifdef CONFIG_PAX_MEMORY_SANITIZE
72335+ unsigned long index = 1UL << order;
72336+#endif
72337+
72338 trace_mm_page_free(page, order);
72339 kmemcheck_free_shadow(page, order);
72340
72341@@ -707,6 +711,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
72342 debug_check_no_obj_freed(page_address(page),
72343 PAGE_SIZE << order);
72344 }
72345+
72346+#ifdef CONFIG_PAX_MEMORY_SANITIZE
72347+ for (; index; --index)
72348+ sanitize_highpage(page + index - 1);
72349+#endif
72350+
72351 arch_free_page(page, order);
72352 kernel_map_pages(page, 1 << order, 0);
72353
72354@@ -830,8 +840,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
72355 arch_alloc_page(page, order);
72356 kernel_map_pages(page, 1 << order, 1);
72357
72358+#ifndef CONFIG_PAX_MEMORY_SANITIZE
72359 if (gfp_flags & __GFP_ZERO)
72360 prep_zero_page(page, order, gfp_flags);
72361+#endif
72362
72363 if (order && (gfp_flags & __GFP_COMP))
72364 prep_compound_page(page, order);
72365@@ -3523,7 +3535,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
72366 unsigned long pfn;
72367
72368 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
72369+#ifdef CONFIG_X86_32
72370+ /* boot failures in VMware 8 on 32bit vanilla since
72371+ this change */
72372+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
72373+#else
72374 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
72375+#endif
72376 return 1;
72377 }
72378 return 0;
72379diff --git a/mm/percpu.c b/mm/percpu.c
72380index bb4be74..a43ea85 100644
72381--- a/mm/percpu.c
72382+++ b/mm/percpu.c
72383@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
72384 static unsigned int pcpu_high_unit_cpu __read_mostly;
72385
72386 /* the address of the first chunk which starts with the kernel static area */
72387-void *pcpu_base_addr __read_mostly;
72388+void *pcpu_base_addr __read_only;
72389 EXPORT_SYMBOL_GPL(pcpu_base_addr);
72390
72391 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
72392diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
72393index c20ff48..137702a 100644
72394--- a/mm/process_vm_access.c
72395+++ b/mm/process_vm_access.c
72396@@ -13,6 +13,7 @@
72397 #include <linux/uio.h>
72398 #include <linux/sched.h>
72399 #include <linux/highmem.h>
72400+#include <linux/security.h>
72401 #include <linux/ptrace.h>
72402 #include <linux/slab.h>
72403 #include <linux/syscalls.h>
72404@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
72405 size_t iov_l_curr_offset = 0;
72406 ssize_t iov_len;
72407
72408+ return -ENOSYS; // PaX: until properly audited
72409+
72410 /*
72411 * Work out how many pages of struct pages we're going to need
72412 * when eventually calling get_user_pages
72413 */
72414 for (i = 0; i < riovcnt; i++) {
72415 iov_len = rvec[i].iov_len;
72416- if (iov_len > 0) {
72417- nr_pages_iov = ((unsigned long)rvec[i].iov_base
72418- + iov_len)
72419- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
72420- / PAGE_SIZE + 1;
72421- nr_pages = max(nr_pages, nr_pages_iov);
72422- }
72423+ if (iov_len <= 0)
72424+ continue;
72425+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
72426+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
72427+ nr_pages = max(nr_pages, nr_pages_iov);
72428 }
72429
72430 if (nr_pages == 0)
72431@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
72432 goto free_proc_pages;
72433 }
72434
72435+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
72436+ rc = -EPERM;
72437+ goto put_task_struct;
72438+ }
72439+
72440 mm = mm_access(task, PTRACE_MODE_ATTACH);
72441 if (!mm || IS_ERR(mm)) {
72442 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
72443diff --git a/mm/rmap.c b/mm/rmap.c
72444index 5b5ad58..0f77903 100644
72445--- a/mm/rmap.c
72446+++ b/mm/rmap.c
72447@@ -167,6 +167,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72448 struct anon_vma *anon_vma = vma->anon_vma;
72449 struct anon_vma_chain *avc;
72450
72451+#ifdef CONFIG_PAX_SEGMEXEC
72452+ struct anon_vma_chain *avc_m = NULL;
72453+#endif
72454+
72455 might_sleep();
72456 if (unlikely(!anon_vma)) {
72457 struct mm_struct *mm = vma->vm_mm;
72458@@ -176,6 +180,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72459 if (!avc)
72460 goto out_enomem;
72461
72462+#ifdef CONFIG_PAX_SEGMEXEC
72463+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
72464+ if (!avc_m)
72465+ goto out_enomem_free_avc;
72466+#endif
72467+
72468 anon_vma = find_mergeable_anon_vma(vma);
72469 allocated = NULL;
72470 if (!anon_vma) {
72471@@ -189,6 +199,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72472 /* page_table_lock to protect against threads */
72473 spin_lock(&mm->page_table_lock);
72474 if (likely(!vma->anon_vma)) {
72475+
72476+#ifdef CONFIG_PAX_SEGMEXEC
72477+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
72478+
72479+ if (vma_m) {
72480+ BUG_ON(vma_m->anon_vma);
72481+ vma_m->anon_vma = anon_vma;
72482+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
72483+ avc_m = NULL;
72484+ }
72485+#endif
72486+
72487 vma->anon_vma = anon_vma;
72488 anon_vma_chain_link(vma, avc, anon_vma);
72489 allocated = NULL;
72490@@ -199,12 +221,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72491
72492 if (unlikely(allocated))
72493 put_anon_vma(allocated);
72494+
72495+#ifdef CONFIG_PAX_SEGMEXEC
72496+ if (unlikely(avc_m))
72497+ anon_vma_chain_free(avc_m);
72498+#endif
72499+
72500 if (unlikely(avc))
72501 anon_vma_chain_free(avc);
72502 }
72503 return 0;
72504
72505 out_enomem_free_avc:
72506+
72507+#ifdef CONFIG_PAX_SEGMEXEC
72508+ if (avc_m)
72509+ anon_vma_chain_free(avc_m);
72510+#endif
72511+
72512 anon_vma_chain_free(avc);
72513 out_enomem:
72514 return -ENOMEM;
72515@@ -240,7 +274,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
72516 * Attach the anon_vmas from src to dst.
72517 * Returns 0 on success, -ENOMEM on failure.
72518 */
72519-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
72520+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
72521 {
72522 struct anon_vma_chain *avc, *pavc;
72523 struct anon_vma *root = NULL;
72524@@ -318,7 +352,7 @@ void anon_vma_moveto_tail(struct vm_area_struct *dst)
72525 * the corresponding VMA in the parent process is attached to.
72526 * Returns 0 on success, non-zero on failure.
72527 */
72528-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
72529+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
72530 {
72531 struct anon_vma_chain *avc;
72532 struct anon_vma *anon_vma;
72533diff --git a/mm/shmem.c b/mm/shmem.c
72534index f99ff3e..faea8b6 100644
72535--- a/mm/shmem.c
72536+++ b/mm/shmem.c
72537@@ -31,7 +31,7 @@
72538 #include <linux/export.h>
72539 #include <linux/swap.h>
72540
72541-static struct vfsmount *shm_mnt;
72542+struct vfsmount *shm_mnt;
72543
72544 #ifdef CONFIG_SHMEM
72545 /*
72546@@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
72547 #define BOGO_DIRENT_SIZE 20
72548
72549 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
72550-#define SHORT_SYMLINK_LEN 128
72551+#define SHORT_SYMLINK_LEN 64
72552
72553 struct shmem_xattr {
72554 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
72555@@ -2235,8 +2235,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
72556 int err = -ENOMEM;
72557
72558 /* Round up to L1_CACHE_BYTES to resist false sharing */
72559- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
72560- L1_CACHE_BYTES), GFP_KERNEL);
72561+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
72562 if (!sbinfo)
72563 return -ENOMEM;
72564
72565diff --git a/mm/slab.c b/mm/slab.c
72566index e901a36..ee8fe97 100644
72567--- a/mm/slab.c
72568+++ b/mm/slab.c
72569@@ -153,7 +153,7 @@
72570
72571 /* Legal flag mask for kmem_cache_create(). */
72572 #if DEBUG
72573-# define CREATE_MASK (SLAB_RED_ZONE | \
72574+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
72575 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
72576 SLAB_CACHE_DMA | \
72577 SLAB_STORE_USER | \
72578@@ -161,7 +161,7 @@
72579 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
72580 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
72581 #else
72582-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
72583+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
72584 SLAB_CACHE_DMA | \
72585 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
72586 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
72587@@ -290,7 +290,7 @@ struct kmem_list3 {
72588 * Need this for bootstrapping a per node allocator.
72589 */
72590 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
72591-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
72592+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
72593 #define CACHE_CACHE 0
72594 #define SIZE_AC MAX_NUMNODES
72595 #define SIZE_L3 (2 * MAX_NUMNODES)
72596@@ -391,10 +391,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
72597 if ((x)->max_freeable < i) \
72598 (x)->max_freeable = i; \
72599 } while (0)
72600-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
72601-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
72602-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
72603-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
72604+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
72605+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
72606+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
72607+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
72608 #else
72609 #define STATS_INC_ACTIVE(x) do { } while (0)
72610 #define STATS_DEC_ACTIVE(x) do { } while (0)
72611@@ -542,7 +542,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
72612 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
72613 */
72614 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
72615- const struct slab *slab, void *obj)
72616+ const struct slab *slab, const void *obj)
72617 {
72618 u32 offset = (obj - slab->s_mem);
72619 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
72620@@ -568,7 +568,7 @@ struct cache_names {
72621 static struct cache_names __initdata cache_names[] = {
72622 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
72623 #include <linux/kmalloc_sizes.h>
72624- {NULL,}
72625+ {NULL}
72626 #undef CACHE
72627 };
72628
72629@@ -1588,7 +1588,7 @@ void __init kmem_cache_init(void)
72630 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
72631 sizes[INDEX_AC].cs_size,
72632 ARCH_KMALLOC_MINALIGN,
72633- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72634+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72635 NULL);
72636
72637 if (INDEX_AC != INDEX_L3) {
72638@@ -1596,7 +1596,7 @@ void __init kmem_cache_init(void)
72639 kmem_cache_create(names[INDEX_L3].name,
72640 sizes[INDEX_L3].cs_size,
72641 ARCH_KMALLOC_MINALIGN,
72642- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72643+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72644 NULL);
72645 }
72646
72647@@ -1614,7 +1614,7 @@ void __init kmem_cache_init(void)
72648 sizes->cs_cachep = kmem_cache_create(names->name,
72649 sizes->cs_size,
72650 ARCH_KMALLOC_MINALIGN,
72651- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72652+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72653 NULL);
72654 }
72655 #ifdef CONFIG_ZONE_DMA
72656@@ -4390,10 +4390,10 @@ static int s_show(struct seq_file *m, void *p)
72657 }
72658 /* cpu stats */
72659 {
72660- unsigned long allochit = atomic_read(&cachep->allochit);
72661- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
72662- unsigned long freehit = atomic_read(&cachep->freehit);
72663- unsigned long freemiss = atomic_read(&cachep->freemiss);
72664+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
72665+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
72666+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
72667+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
72668
72669 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
72670 allochit, allocmiss, freehit, freemiss);
72671@@ -4652,13 +4652,62 @@ static int __init slab_proc_init(void)
72672 {
72673 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
72674 #ifdef CONFIG_DEBUG_SLAB_LEAK
72675- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
72676+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
72677 #endif
72678 return 0;
72679 }
72680 module_init(slab_proc_init);
72681 #endif
72682
72683+void check_object_size(const void *ptr, unsigned long n, bool to)
72684+{
72685+
72686+#ifdef CONFIG_PAX_USERCOPY
72687+ struct page *page;
72688+ struct kmem_cache *cachep = NULL;
72689+ struct slab *slabp;
72690+ unsigned int objnr;
72691+ unsigned long offset;
72692+ const char *type;
72693+
72694+ if (!n)
72695+ return;
72696+
72697+ type = "<null>";
72698+ if (ZERO_OR_NULL_PTR(ptr))
72699+ goto report;
72700+
72701+ if (!virt_addr_valid(ptr))
72702+ return;
72703+
72704+ page = virt_to_head_page(ptr);
72705+
72706+ type = "<process stack>";
72707+ if (!PageSlab(page)) {
72708+ if (object_is_on_stack(ptr, n) == -1)
72709+ goto report;
72710+ return;
72711+ }
72712+
72713+ cachep = page_get_cache(page);
72714+ type = cachep->name;
72715+ if (!(cachep->flags & SLAB_USERCOPY))
72716+ goto report;
72717+
72718+ slabp = page_get_slab(page);
72719+ objnr = obj_to_index(cachep, slabp, ptr);
72720+ BUG_ON(objnr >= cachep->num);
72721+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
72722+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
72723+ return;
72724+
72725+report:
72726+ pax_report_usercopy(ptr, n, to, type);
72727+#endif
72728+
72729+}
72730+EXPORT_SYMBOL(check_object_size);
72731+
72732 /**
72733 * ksize - get the actual amount of memory allocated for a given object
72734 * @objp: Pointer to the object
72735diff --git a/mm/slob.c b/mm/slob.c
72736index 8105be4..e045f96 100644
72737--- a/mm/slob.c
72738+++ b/mm/slob.c
72739@@ -29,7 +29,7 @@
72740 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
72741 * alloc_pages() directly, allocating compound pages so the page order
72742 * does not have to be separately tracked, and also stores the exact
72743- * allocation size in page->private so that it can be used to accurately
72744+ * allocation size in slob_page->size so that it can be used to accurately
72745 * provide ksize(). These objects are detected in kfree() because slob_page()
72746 * is false for them.
72747 *
72748@@ -58,6 +58,7 @@
72749 */
72750
72751 #include <linux/kernel.h>
72752+#include <linux/sched.h>
72753 #include <linux/slab.h>
72754 #include <linux/mm.h>
72755 #include <linux/swap.h> /* struct reclaim_state */
72756@@ -102,7 +103,8 @@ struct slob_page {
72757 unsigned long flags; /* mandatory */
72758 atomic_t _count; /* mandatory */
72759 slobidx_t units; /* free units left in page */
72760- unsigned long pad[2];
72761+ unsigned long pad[1];
72762+ unsigned long size; /* size when >=PAGE_SIZE */
72763 slob_t *free; /* first free slob_t in page */
72764 struct list_head list; /* linked list of free pages */
72765 };
72766@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
72767 */
72768 static inline int is_slob_page(struct slob_page *sp)
72769 {
72770- return PageSlab((struct page *)sp);
72771+ return PageSlab((struct page *)sp) && !sp->size;
72772 }
72773
72774 static inline void set_slob_page(struct slob_page *sp)
72775@@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
72776
72777 static inline struct slob_page *slob_page(const void *addr)
72778 {
72779- return (struct slob_page *)virt_to_page(addr);
72780+ return (struct slob_page *)virt_to_head_page(addr);
72781 }
72782
72783 /*
72784@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
72785 /*
72786 * Return the size of a slob block.
72787 */
72788-static slobidx_t slob_units(slob_t *s)
72789+static slobidx_t slob_units(const slob_t *s)
72790 {
72791 if (s->units > 0)
72792 return s->units;
72793@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
72794 /*
72795 * Return the next free slob block pointer after this one.
72796 */
72797-static slob_t *slob_next(slob_t *s)
72798+static slob_t *slob_next(const slob_t *s)
72799 {
72800 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
72801 slobidx_t next;
72802@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
72803 /*
72804 * Returns true if s is the last free block in its page.
72805 */
72806-static int slob_last(slob_t *s)
72807+static int slob_last(const slob_t *s)
72808 {
72809 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
72810 }
72811@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
72812 if (!page)
72813 return NULL;
72814
72815+ set_slob_page(page);
72816 return page_address(page);
72817 }
72818
72819@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
72820 if (!b)
72821 return NULL;
72822 sp = slob_page(b);
72823- set_slob_page(sp);
72824
72825 spin_lock_irqsave(&slob_lock, flags);
72826 sp->units = SLOB_UNITS(PAGE_SIZE);
72827 sp->free = b;
72828+ sp->size = 0;
72829 INIT_LIST_HEAD(&sp->list);
72830 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
72831 set_slob_page_free(sp, slob_list);
72832@@ -476,10 +479,9 @@ out:
72833 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
72834 */
72835
72836-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72837+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
72838 {
72839- unsigned int *m;
72840- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72841+ slob_t *m;
72842 void *ret;
72843
72844 gfp &= gfp_allowed_mask;
72845@@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72846
72847 if (!m)
72848 return NULL;
72849- *m = size;
72850+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
72851+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
72852+ m[0].units = size;
72853+ m[1].units = align;
72854 ret = (void *)m + align;
72855
72856 trace_kmalloc_node(_RET_IP_, ret,
72857@@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72858 gfp |= __GFP_COMP;
72859 ret = slob_new_pages(gfp, order, node);
72860 if (ret) {
72861- struct page *page;
72862- page = virt_to_page(ret);
72863- page->private = size;
72864+ struct slob_page *sp;
72865+ sp = slob_page(ret);
72866+ sp->size = size;
72867 }
72868
72869 trace_kmalloc_node(_RET_IP_, ret,
72870 size, PAGE_SIZE << order, gfp, node);
72871 }
72872
72873- kmemleak_alloc(ret, size, 1, gfp);
72874+ return ret;
72875+}
72876+
72877+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72878+{
72879+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72880+ void *ret = __kmalloc_node_align(size, gfp, node, align);
72881+
72882+ if (!ZERO_OR_NULL_PTR(ret))
72883+ kmemleak_alloc(ret, size, 1, gfp);
72884 return ret;
72885 }
72886 EXPORT_SYMBOL(__kmalloc_node);
72887@@ -533,13 +547,92 @@ void kfree(const void *block)
72888 sp = slob_page(block);
72889 if (is_slob_page(sp)) {
72890 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72891- unsigned int *m = (unsigned int *)(block - align);
72892- slob_free(m, *m + align);
72893- } else
72894+ slob_t *m = (slob_t *)(block - align);
72895+ slob_free(m, m[0].units + align);
72896+ } else {
72897+ clear_slob_page(sp);
72898+ free_slob_page(sp);
72899+ sp->size = 0;
72900 put_page(&sp->page);
72901+ }
72902 }
72903 EXPORT_SYMBOL(kfree);
72904
72905+void check_object_size(const void *ptr, unsigned long n, bool to)
72906+{
72907+
72908+#ifdef CONFIG_PAX_USERCOPY
72909+ struct slob_page *sp;
72910+ const slob_t *free;
72911+ const void *base;
72912+ unsigned long flags;
72913+ const char *type;
72914+
72915+ if (!n)
72916+ return;
72917+
72918+ type = "<null>";
72919+ if (ZERO_OR_NULL_PTR(ptr))
72920+ goto report;
72921+
72922+ if (!virt_addr_valid(ptr))
72923+ return;
72924+
72925+ type = "<process stack>";
72926+ sp = slob_page(ptr);
72927+ if (!PageSlab((struct page *)sp)) {
72928+ if (object_is_on_stack(ptr, n) == -1)
72929+ goto report;
72930+ return;
72931+ }
72932+
72933+ type = "<slob>";
72934+ if (sp->size) {
72935+ base = page_address(&sp->page);
72936+ if (base <= ptr && n <= sp->size - (ptr - base))
72937+ return;
72938+ goto report;
72939+ }
72940+
72941+ /* some tricky double walking to find the chunk */
72942+ spin_lock_irqsave(&slob_lock, flags);
72943+ base = (void *)((unsigned long)ptr & PAGE_MASK);
72944+ free = sp->free;
72945+
72946+ while (!slob_last(free) && (void *)free <= ptr) {
72947+ base = free + slob_units(free);
72948+ free = slob_next(free);
72949+ }
72950+
72951+ while (base < (void *)free) {
72952+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
72953+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
72954+ int offset;
72955+
72956+ if (ptr < base + align)
72957+ break;
72958+
72959+ offset = ptr - base - align;
72960+ if (offset >= m) {
72961+ base += size;
72962+ continue;
72963+ }
72964+
72965+ if (n > m - offset)
72966+ break;
72967+
72968+ spin_unlock_irqrestore(&slob_lock, flags);
72969+ return;
72970+ }
72971+
72972+ spin_unlock_irqrestore(&slob_lock, flags);
72973+report:
72974+ pax_report_usercopy(ptr, n, to, type);
72975+#endif
72976+
72977+}
72978+EXPORT_SYMBOL(check_object_size);
72979+
72980 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
72981 size_t ksize(const void *block)
72982 {
72983@@ -552,10 +645,10 @@ size_t ksize(const void *block)
72984 sp = slob_page(block);
72985 if (is_slob_page(sp)) {
72986 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72987- unsigned int *m = (unsigned int *)(block - align);
72988- return SLOB_UNITS(*m) * SLOB_UNIT;
72989+ slob_t *m = (slob_t *)(block - align);
72990+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
72991 } else
72992- return sp->page.private;
72993+ return sp->size;
72994 }
72995 EXPORT_SYMBOL(ksize);
72996
72997@@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
72998 {
72999 struct kmem_cache *c;
73000
73001+#ifdef CONFIG_PAX_USERCOPY
73002+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
73003+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
73004+#else
73005 c = slob_alloc(sizeof(struct kmem_cache),
73006 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
73007+#endif
73008
73009 if (c) {
73010 c->name = name;
73011@@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
73012
73013 lockdep_trace_alloc(flags);
73014
73015+#ifdef CONFIG_PAX_USERCOPY
73016+ b = __kmalloc_node_align(c->size, flags, node, c->align);
73017+#else
73018 if (c->size < PAGE_SIZE) {
73019 b = slob_alloc(c->size, flags, c->align, node);
73020 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
73021 SLOB_UNITS(c->size) * SLOB_UNIT,
73022 flags, node);
73023 } else {
73024+ struct slob_page *sp;
73025+
73026 b = slob_new_pages(flags, get_order(c->size), node);
73027+ sp = slob_page(b);
73028+ sp->size = c->size;
73029 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
73030 PAGE_SIZE << get_order(c->size),
73031 flags, node);
73032 }
73033+#endif
73034
73035 if (c->ctor)
73036 c->ctor(b);
73037@@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
73038
73039 static void __kmem_cache_free(void *b, int size)
73040 {
73041- if (size < PAGE_SIZE)
73042+ struct slob_page *sp = slob_page(b);
73043+
73044+ if (is_slob_page(sp))
73045 slob_free(b, size);
73046- else
73047+ else {
73048+ clear_slob_page(sp);
73049+ free_slob_page(sp);
73050+ sp->size = 0;
73051 slob_free_pages(b, get_order(size));
73052+ }
73053 }
73054
73055 static void kmem_rcu_free(struct rcu_head *head)
73056@@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
73057
73058 void kmem_cache_free(struct kmem_cache *c, void *b)
73059 {
73060+ int size = c->size;
73061+
73062+#ifdef CONFIG_PAX_USERCOPY
73063+ if (size + c->align < PAGE_SIZE) {
73064+ size += c->align;
73065+ b -= c->align;
73066+ }
73067+#endif
73068+
73069 kmemleak_free_recursive(b, c->flags);
73070 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
73071 struct slob_rcu *slob_rcu;
73072- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
73073- slob_rcu->size = c->size;
73074+ slob_rcu = b + (size - sizeof(struct slob_rcu));
73075+ slob_rcu->size = size;
73076 call_rcu(&slob_rcu->head, kmem_rcu_free);
73077 } else {
73078- __kmem_cache_free(b, c->size);
73079+ __kmem_cache_free(b, size);
73080 }
73081
73082+#ifdef CONFIG_PAX_USERCOPY
73083+ trace_kfree(_RET_IP_, b);
73084+#else
73085 trace_kmem_cache_free(_RET_IP_, b);
73086+#endif
73087+
73088 }
73089 EXPORT_SYMBOL(kmem_cache_free);
73090
73091diff --git a/mm/slub.c b/mm/slub.c
73092index 71de9b5..dd263c5 100644
73093--- a/mm/slub.c
73094+++ b/mm/slub.c
73095@@ -209,7 +209,7 @@ struct track {
73096
73097 enum track_item { TRACK_ALLOC, TRACK_FREE };
73098
73099-#ifdef CONFIG_SYSFS
73100+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73101 static int sysfs_slab_add(struct kmem_cache *);
73102 static int sysfs_slab_alias(struct kmem_cache *, const char *);
73103 static void sysfs_slab_remove(struct kmem_cache *);
73104@@ -538,7 +538,7 @@ static void print_track(const char *s, struct track *t)
73105 if (!t->addr)
73106 return;
73107
73108- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
73109+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
73110 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
73111 #ifdef CONFIG_STACKTRACE
73112 {
73113@@ -2603,6 +2603,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
73114
73115 page = virt_to_head_page(x);
73116
73117+ BUG_ON(!PageSlab(page));
73118+
73119 slab_free(s, page, x, _RET_IP_);
73120
73121 trace_kmem_cache_free(_RET_IP_, x);
73122@@ -2636,7 +2638,7 @@ static int slub_min_objects;
73123 * Merge control. If this is set then no merging of slab caches will occur.
73124 * (Could be removed. This was introduced to pacify the merge skeptics.)
73125 */
73126-static int slub_nomerge;
73127+static int slub_nomerge = 1;
73128
73129 /*
73130 * Calculate the order of allocation given an slab object size.
73131@@ -3089,7 +3091,7 @@ static int kmem_cache_open(struct kmem_cache *s,
73132 else
73133 s->cpu_partial = 30;
73134
73135- s->refcount = 1;
73136+ atomic_set(&s->refcount, 1);
73137 #ifdef CONFIG_NUMA
73138 s->remote_node_defrag_ratio = 1000;
73139 #endif
73140@@ -3193,8 +3195,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
73141 void kmem_cache_destroy(struct kmem_cache *s)
73142 {
73143 down_write(&slub_lock);
73144- s->refcount--;
73145- if (!s->refcount) {
73146+ if (atomic_dec_and_test(&s->refcount)) {
73147 list_del(&s->list);
73148 up_write(&slub_lock);
73149 if (kmem_cache_close(s)) {
73150@@ -3405,6 +3406,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
73151 EXPORT_SYMBOL(__kmalloc_node);
73152 #endif
73153
73154+void check_object_size(const void *ptr, unsigned long n, bool to)
73155+{
73156+
73157+#ifdef CONFIG_PAX_USERCOPY
73158+ struct page *page;
73159+ struct kmem_cache *s = NULL;
73160+ unsigned long offset;
73161+ const char *type;
73162+
73163+ if (!n)
73164+ return;
73165+
73166+ type = "<null>";
73167+ if (ZERO_OR_NULL_PTR(ptr))
73168+ goto report;
73169+
73170+ if (!virt_addr_valid(ptr))
73171+ return;
73172+
73173+ page = virt_to_head_page(ptr);
73174+
73175+ type = "<process stack>";
73176+ if (!PageSlab(page)) {
73177+ if (object_is_on_stack(ptr, n) == -1)
73178+ goto report;
73179+ return;
73180+ }
73181+
73182+ s = page->slab;
73183+ type = s->name;
73184+ if (!(s->flags & SLAB_USERCOPY))
73185+ goto report;
73186+
73187+ offset = (ptr - page_address(page)) % s->size;
73188+ if (offset <= s->objsize && n <= s->objsize - offset)
73189+ return;
73190+
73191+report:
73192+ pax_report_usercopy(ptr, n, to, type);
73193+#endif
73194+
73195+}
73196+EXPORT_SYMBOL(check_object_size);
73197+
73198 size_t ksize(const void *object)
73199 {
73200 struct page *page;
73201@@ -3679,7 +3724,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
73202 int node;
73203
73204 list_add(&s->list, &slab_caches);
73205- s->refcount = -1;
73206+ atomic_set(&s->refcount, -1);
73207
73208 for_each_node_state(node, N_NORMAL_MEMORY) {
73209 struct kmem_cache_node *n = get_node(s, node);
73210@@ -3799,17 +3844,17 @@ void __init kmem_cache_init(void)
73211
73212 /* Caches that are not of the two-to-the-power-of size */
73213 if (KMALLOC_MIN_SIZE <= 32) {
73214- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
73215+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
73216 caches++;
73217 }
73218
73219 if (KMALLOC_MIN_SIZE <= 64) {
73220- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
73221+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
73222 caches++;
73223 }
73224
73225 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
73226- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
73227+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
73228 caches++;
73229 }
73230
73231@@ -3877,7 +3922,7 @@ static int slab_unmergeable(struct kmem_cache *s)
73232 /*
73233 * We may have set a slab to be unmergeable during bootstrap.
73234 */
73235- if (s->refcount < 0)
73236+ if (atomic_read(&s->refcount) < 0)
73237 return 1;
73238
73239 return 0;
73240@@ -3936,7 +3981,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73241 down_write(&slub_lock);
73242 s = find_mergeable(size, align, flags, name, ctor);
73243 if (s) {
73244- s->refcount++;
73245+ atomic_inc(&s->refcount);
73246 /*
73247 * Adjust the object sizes so that we clear
73248 * the complete object on kzalloc.
73249@@ -3945,7 +3990,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73250 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
73251
73252 if (sysfs_slab_alias(s, name)) {
73253- s->refcount--;
73254+ atomic_dec(&s->refcount);
73255 goto err;
73256 }
73257 up_write(&slub_lock);
73258@@ -4074,7 +4119,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
73259 }
73260 #endif
73261
73262-#ifdef CONFIG_SYSFS
73263+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73264 static int count_inuse(struct page *page)
73265 {
73266 return page->inuse;
73267@@ -4461,12 +4506,12 @@ static void resiliency_test(void)
73268 validate_slab_cache(kmalloc_caches[9]);
73269 }
73270 #else
73271-#ifdef CONFIG_SYSFS
73272+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73273 static void resiliency_test(void) {};
73274 #endif
73275 #endif
73276
73277-#ifdef CONFIG_SYSFS
73278+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73279 enum slab_stat_type {
73280 SL_ALL, /* All slabs */
73281 SL_PARTIAL, /* Only partially allocated slabs */
73282@@ -4709,7 +4754,7 @@ SLAB_ATTR_RO(ctor);
73283
73284 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
73285 {
73286- return sprintf(buf, "%d\n", s->refcount - 1);
73287+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
73288 }
73289 SLAB_ATTR_RO(aliases);
73290
73291@@ -5280,6 +5325,7 @@ static char *create_unique_id(struct kmem_cache *s)
73292 return name;
73293 }
73294
73295+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73296 static int sysfs_slab_add(struct kmem_cache *s)
73297 {
73298 int err;
73299@@ -5342,6 +5388,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
73300 kobject_del(&s->kobj);
73301 kobject_put(&s->kobj);
73302 }
73303+#endif
73304
73305 /*
73306 * Need to buffer aliases during bootup until sysfs becomes
73307@@ -5355,6 +5402,7 @@ struct saved_alias {
73308
73309 static struct saved_alias *alias_list;
73310
73311+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73312 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
73313 {
73314 struct saved_alias *al;
73315@@ -5377,6 +5425,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
73316 alias_list = al;
73317 return 0;
73318 }
73319+#endif
73320
73321 static int __init slab_sysfs_init(void)
73322 {
73323diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
73324index 1b7e22a..3fcd4f3 100644
73325--- a/mm/sparse-vmemmap.c
73326+++ b/mm/sparse-vmemmap.c
73327@@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
73328 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
73329 if (!p)
73330 return NULL;
73331- pud_populate(&init_mm, pud, p);
73332+ pud_populate_kernel(&init_mm, pud, p);
73333 }
73334 return pud;
73335 }
73336@@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
73337 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
73338 if (!p)
73339 return NULL;
73340- pgd_populate(&init_mm, pgd, p);
73341+ pgd_populate_kernel(&init_mm, pgd, p);
73342 }
73343 return pgd;
73344 }
73345diff --git a/mm/swap.c b/mm/swap.c
73346index 5c13f13..f1cfc13 100644
73347--- a/mm/swap.c
73348+++ b/mm/swap.c
73349@@ -30,6 +30,7 @@
73350 #include <linux/backing-dev.h>
73351 #include <linux/memcontrol.h>
73352 #include <linux/gfp.h>
73353+#include <linux/hugetlb.h>
73354
73355 #include "internal.h"
73356
73357@@ -70,6 +71,8 @@ static void __put_compound_page(struct page *page)
73358
73359 __page_cache_release(page);
73360 dtor = get_compound_page_dtor(page);
73361+ if (!PageHuge(page))
73362+ BUG_ON(dtor != free_compound_page);
73363 (*dtor)(page);
73364 }
73365
73366diff --git a/mm/swapfile.c b/mm/swapfile.c
73367index fafc26d..1b7493e 100644
73368--- a/mm/swapfile.c
73369+++ b/mm/swapfile.c
73370@@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
73371
73372 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
73373 /* Activity counter to indicate that a swapon or swapoff has occurred */
73374-static atomic_t proc_poll_event = ATOMIC_INIT(0);
73375+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
73376
73377 static inline unsigned char swap_count(unsigned char ent)
73378 {
73379@@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
73380 }
73381 filp_close(swap_file, NULL);
73382 err = 0;
73383- atomic_inc(&proc_poll_event);
73384+ atomic_inc_unchecked(&proc_poll_event);
73385 wake_up_interruptible(&proc_poll_wait);
73386
73387 out_dput:
73388@@ -1687,8 +1687,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
73389
73390 poll_wait(file, &proc_poll_wait, wait);
73391
73392- if (seq->poll_event != atomic_read(&proc_poll_event)) {
73393- seq->poll_event = atomic_read(&proc_poll_event);
73394+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
73395+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
73396 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
73397 }
73398
73399@@ -1786,7 +1786,7 @@ static int swaps_open(struct inode *inode, struct file *file)
73400 return ret;
73401
73402 seq = file->private_data;
73403- seq->poll_event = atomic_read(&proc_poll_event);
73404+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
73405 return 0;
73406 }
73407
73408@@ -2127,7 +2127,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
73409 (p->flags & SWP_DISCARDABLE) ? "D" : "");
73410
73411 mutex_unlock(&swapon_mutex);
73412- atomic_inc(&proc_poll_event);
73413+ atomic_inc_unchecked(&proc_poll_event);
73414 wake_up_interruptible(&proc_poll_wait);
73415
73416 if (S_ISREG(inode->i_mode))
73417diff --git a/mm/util.c b/mm/util.c
73418index ae962b3..0bba886 100644
73419--- a/mm/util.c
73420+++ b/mm/util.c
73421@@ -284,6 +284,12 @@ done:
73422 void arch_pick_mmap_layout(struct mm_struct *mm)
73423 {
73424 mm->mmap_base = TASK_UNMAPPED_BASE;
73425+
73426+#ifdef CONFIG_PAX_RANDMMAP
73427+ if (mm->pax_flags & MF_PAX_RANDMMAP)
73428+ mm->mmap_base += mm->delta_mmap;
73429+#endif
73430+
73431 mm->get_unmapped_area = arch_get_unmapped_area;
73432 mm->unmap_area = arch_unmap_area;
73433 }
73434diff --git a/mm/vmalloc.c b/mm/vmalloc.c
73435index 1196c77..2e608e8 100644
73436--- a/mm/vmalloc.c
73437+++ b/mm/vmalloc.c
73438@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
73439
73440 pte = pte_offset_kernel(pmd, addr);
73441 do {
73442- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
73443- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
73444+
73445+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73446+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
73447+ BUG_ON(!pte_exec(*pte));
73448+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
73449+ continue;
73450+ }
73451+#endif
73452+
73453+ {
73454+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
73455+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
73456+ }
73457 } while (pte++, addr += PAGE_SIZE, addr != end);
73458 }
73459
73460@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
73461 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
73462 {
73463 pte_t *pte;
73464+ int ret = -ENOMEM;
73465
73466 /*
73467 * nr is a running index into the array which helps higher level
73468@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
73469 pte = pte_alloc_kernel(pmd, addr);
73470 if (!pte)
73471 return -ENOMEM;
73472+
73473+ pax_open_kernel();
73474 do {
73475 struct page *page = pages[*nr];
73476
73477- if (WARN_ON(!pte_none(*pte)))
73478- return -EBUSY;
73479- if (WARN_ON(!page))
73480- return -ENOMEM;
73481+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73482+ if (pgprot_val(prot) & _PAGE_NX)
73483+#endif
73484+
73485+ if (WARN_ON(!pte_none(*pte))) {
73486+ ret = -EBUSY;
73487+ goto out;
73488+ }
73489+ if (WARN_ON(!page)) {
73490+ ret = -ENOMEM;
73491+ goto out;
73492+ }
73493 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
73494 (*nr)++;
73495 } while (pte++, addr += PAGE_SIZE, addr != end);
73496- return 0;
73497+ ret = 0;
73498+out:
73499+ pax_close_kernel();
73500+ return ret;
73501 }
73502
73503 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
73504@@ -119,7 +144,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
73505 pmd_t *pmd;
73506 unsigned long next;
73507
73508- pmd = pmd_alloc(&init_mm, pud, addr);
73509+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
73510 if (!pmd)
73511 return -ENOMEM;
73512 do {
73513@@ -136,7 +161,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
73514 pud_t *pud;
73515 unsigned long next;
73516
73517- pud = pud_alloc(&init_mm, pgd, addr);
73518+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
73519 if (!pud)
73520 return -ENOMEM;
73521 do {
73522@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
73523 * and fall back on vmalloc() if that fails. Others
73524 * just put it in the vmalloc space.
73525 */
73526-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
73527+#ifdef CONFIG_MODULES
73528+#ifdef MODULES_VADDR
73529 unsigned long addr = (unsigned long)x;
73530 if (addr >= MODULES_VADDR && addr < MODULES_END)
73531 return 1;
73532 #endif
73533+
73534+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73535+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
73536+ return 1;
73537+#endif
73538+
73539+#endif
73540+
73541 return is_vmalloc_addr(x);
73542 }
73543
73544@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
73545
73546 if (!pgd_none(*pgd)) {
73547 pud_t *pud = pud_offset(pgd, addr);
73548+#ifdef CONFIG_X86
73549+ if (!pud_large(*pud))
73550+#endif
73551 if (!pud_none(*pud)) {
73552 pmd_t *pmd = pmd_offset(pud, addr);
73553+#ifdef CONFIG_X86
73554+ if (!pmd_large(*pmd))
73555+#endif
73556 if (!pmd_none(*pmd)) {
73557 pte_t *ptep, pte;
73558
73559@@ -332,6 +372,10 @@ static void purge_vmap_area_lazy(void);
73560 static struct vmap_area *alloc_vmap_area(unsigned long size,
73561 unsigned long align,
73562 unsigned long vstart, unsigned long vend,
73563+ int node, gfp_t gfp_mask) __size_overflow(1);
73564+static struct vmap_area *alloc_vmap_area(unsigned long size,
73565+ unsigned long align,
73566+ unsigned long vstart, unsigned long vend,
73567 int node, gfp_t gfp_mask)
73568 {
73569 struct vmap_area *va;
73570@@ -1320,6 +1364,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
73571 struct vm_struct *area;
73572
73573 BUG_ON(in_interrupt());
73574+
73575+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73576+ if (flags & VM_KERNEXEC) {
73577+ if (start != VMALLOC_START || end != VMALLOC_END)
73578+ return NULL;
73579+ start = (unsigned long)MODULES_EXEC_VADDR;
73580+ end = (unsigned long)MODULES_EXEC_END;
73581+ }
73582+#endif
73583+
73584 if (flags & VM_IOREMAP) {
73585 int bit = fls(size);
73586
73587@@ -1552,6 +1606,11 @@ void *vmap(struct page **pages, unsigned int count,
73588 if (count > totalram_pages)
73589 return NULL;
73590
73591+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73592+ if (!(pgprot_val(prot) & _PAGE_NX))
73593+ flags |= VM_KERNEXEC;
73594+#endif
73595+
73596 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
73597 __builtin_return_address(0));
73598 if (!area)
73599@@ -1653,6 +1712,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
73600 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
73601 goto fail;
73602
73603+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73604+ if (!(pgprot_val(prot) & _PAGE_NX))
73605+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
73606+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
73607+ else
73608+#endif
73609+
73610 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
73611 start, end, node, gfp_mask, caller);
73612 if (!area)
73613@@ -1826,10 +1892,9 @@ EXPORT_SYMBOL(vzalloc_node);
73614 * For tight control over page level allocator and protection flags
73615 * use __vmalloc() instead.
73616 */
73617-
73618 void *vmalloc_exec(unsigned long size)
73619 {
73620- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
73621+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
73622 -1, __builtin_return_address(0));
73623 }
73624
73625@@ -2124,6 +2189,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
73626 unsigned long uaddr = vma->vm_start;
73627 unsigned long usize = vma->vm_end - vma->vm_start;
73628
73629+ BUG_ON(vma->vm_mirror);
73630+
73631 if ((PAGE_SIZE-1) & (unsigned long)addr)
73632 return -EINVAL;
73633
73634@@ -2376,8 +2443,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
73635 return NULL;
73636 }
73637
73638- vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL);
73639- vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL);
73640+ vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
73641+ vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
73642 if (!vas || !vms)
73643 goto err_free2;
73644
73645diff --git a/mm/vmstat.c b/mm/vmstat.c
73646index 7db1b9b..e9f6b07 100644
73647--- a/mm/vmstat.c
73648+++ b/mm/vmstat.c
73649@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
73650 *
73651 * vm_stat contains the global counters
73652 */
73653-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
73654+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
73655 EXPORT_SYMBOL(vm_stat);
73656
73657 #ifdef CONFIG_SMP
73658@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
73659 v = p->vm_stat_diff[i];
73660 p->vm_stat_diff[i] = 0;
73661 local_irq_restore(flags);
73662- atomic_long_add(v, &zone->vm_stat[i]);
73663+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
73664 global_diff[i] += v;
73665 #ifdef CONFIG_NUMA
73666 /* 3 seconds idle till flush */
73667@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
73668
73669 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
73670 if (global_diff[i])
73671- atomic_long_add(global_diff[i], &vm_stat[i]);
73672+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
73673 }
73674
73675 #endif
73676@@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
73677 start_cpu_timer(cpu);
73678 #endif
73679 #ifdef CONFIG_PROC_FS
73680- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
73681- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
73682- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
73683- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
73684+ {
73685+ mode_t gr_mode = S_IRUGO;
73686+#ifdef CONFIG_GRKERNSEC_PROC_ADD
73687+ gr_mode = S_IRUSR;
73688+#endif
73689+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
73690+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
73691+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
73692+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
73693+#else
73694+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
73695+#endif
73696+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
73697+ }
73698 #endif
73699 return 0;
73700 }
73701diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
73702index efea35b..9c8dd0b 100644
73703--- a/net/8021q/vlan.c
73704+++ b/net/8021q/vlan.c
73705@@ -554,8 +554,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
73706 err = -EPERM;
73707 if (!capable(CAP_NET_ADMIN))
73708 break;
73709- if ((args.u.name_type >= 0) &&
73710- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
73711+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
73712 struct vlan_net *vn;
73713
73714 vn = net_generic(net, vlan_net_id);
73715diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
73716index fccae26..e7ece2f 100644
73717--- a/net/9p/trans_fd.c
73718+++ b/net/9p/trans_fd.c
73719@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
73720 oldfs = get_fs();
73721 set_fs(get_ds());
73722 /* The cast to a user pointer is valid due to the set_fs() */
73723- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
73724+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
73725 set_fs(oldfs);
73726
73727 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
73728diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
73729index 876fbe8..8bbea9f 100644
73730--- a/net/atm/atm_misc.c
73731+++ b/net/atm/atm_misc.c
73732@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
73733 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
73734 return 1;
73735 atm_return(vcc, truesize);
73736- atomic_inc(&vcc->stats->rx_drop);
73737+ atomic_inc_unchecked(&vcc->stats->rx_drop);
73738 return 0;
73739 }
73740 EXPORT_SYMBOL(atm_charge);
73741@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
73742 }
73743 }
73744 atm_return(vcc, guess);
73745- atomic_inc(&vcc->stats->rx_drop);
73746+ atomic_inc_unchecked(&vcc->stats->rx_drop);
73747 return NULL;
73748 }
73749 EXPORT_SYMBOL(atm_alloc_charge);
73750@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
73751
73752 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
73753 {
73754-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
73755+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
73756 __SONET_ITEMS
73757 #undef __HANDLE_ITEM
73758 }
73759@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
73760
73761 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
73762 {
73763-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
73764+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
73765 __SONET_ITEMS
73766 #undef __HANDLE_ITEM
73767 }
73768diff --git a/net/atm/lec.h b/net/atm/lec.h
73769index dfc0719..47c5322 100644
73770--- a/net/atm/lec.h
73771+++ b/net/atm/lec.h
73772@@ -48,7 +48,7 @@ struct lane2_ops {
73773 const u8 *tlvs, u32 sizeoftlvs);
73774 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
73775 const u8 *tlvs, u32 sizeoftlvs);
73776-};
73777+} __no_const;
73778
73779 /*
73780 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
73781diff --git a/net/atm/mpc.h b/net/atm/mpc.h
73782index 0919a88..a23d54e 100644
73783--- a/net/atm/mpc.h
73784+++ b/net/atm/mpc.h
73785@@ -33,7 +33,7 @@ struct mpoa_client {
73786 struct mpc_parameters parameters; /* parameters for this client */
73787
73788 const struct net_device_ops *old_ops;
73789- struct net_device_ops new_ops;
73790+ net_device_ops_no_const new_ops;
73791 };
73792
73793
73794diff --git a/net/atm/proc.c b/net/atm/proc.c
73795index 0d020de..011c7bb 100644
73796--- a/net/atm/proc.c
73797+++ b/net/atm/proc.c
73798@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
73799 const struct k_atm_aal_stats *stats)
73800 {
73801 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
73802- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
73803- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
73804- atomic_read(&stats->rx_drop));
73805+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
73806+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
73807+ atomic_read_unchecked(&stats->rx_drop));
73808 }
73809
73810 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
73811diff --git a/net/atm/resources.c b/net/atm/resources.c
73812index 23f45ce..c748f1a 100644
73813--- a/net/atm/resources.c
73814+++ b/net/atm/resources.c
73815@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
73816 static void copy_aal_stats(struct k_atm_aal_stats *from,
73817 struct atm_aal_stats *to)
73818 {
73819-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
73820+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
73821 __AAL_STAT_ITEMS
73822 #undef __HANDLE_ITEM
73823 }
73824@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
73825 static void subtract_aal_stats(struct k_atm_aal_stats *from,
73826 struct atm_aal_stats *to)
73827 {
73828-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
73829+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
73830 __AAL_STAT_ITEMS
73831 #undef __HANDLE_ITEM
73832 }
73833diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
73834index a6d5d63..1cc6c2b 100644
73835--- a/net/batman-adv/bat_iv_ogm.c
73836+++ b/net/batman-adv/bat_iv_ogm.c
73837@@ -539,7 +539,7 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
73838
73839 /* change sequence number to network order */
73840 batman_ogm_packet->seqno =
73841- htonl((uint32_t)atomic_read(&hard_iface->seqno));
73842+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
73843
73844 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
73845 batman_ogm_packet->tt_crc = htons((uint16_t)
73846@@ -559,7 +559,7 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
73847 else
73848 batman_ogm_packet->gw_flags = NO_FLAGS;
73849
73850- atomic_inc(&hard_iface->seqno);
73851+ atomic_inc_unchecked(&hard_iface->seqno);
73852
73853 slide_own_bcast_window(hard_iface);
73854 bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
73855@@ -917,7 +917,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
73856 return;
73857
73858 /* could be changed by schedule_own_packet() */
73859- if_incoming_seqno = atomic_read(&if_incoming->seqno);
73860+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
73861
73862 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
73863
73864diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
73865index 3778977..f6a9450 100644
73866--- a/net/batman-adv/hard-interface.c
73867+++ b/net/batman-adv/hard-interface.c
73868@@ -328,8 +328,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
73869 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
73870 dev_add_pack(&hard_iface->batman_adv_ptype);
73871
73872- atomic_set(&hard_iface->seqno, 1);
73873- atomic_set(&hard_iface->frag_seqno, 1);
73874+ atomic_set_unchecked(&hard_iface->seqno, 1);
73875+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
73876 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
73877 hard_iface->net_dev->name);
73878
73879diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
73880index a5590f4..8d31969 100644
73881--- a/net/batman-adv/soft-interface.c
73882+++ b/net/batman-adv/soft-interface.c
73883@@ -645,7 +645,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
73884
73885 /* set broadcast sequence number */
73886 bcast_packet->seqno =
73887- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
73888+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
73889
73890 add_bcast_packet_to_list(bat_priv, skb, 1);
73891
73892@@ -841,7 +841,7 @@ struct net_device *softif_create(const char *name)
73893 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
73894
73895 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
73896- atomic_set(&bat_priv->bcast_seqno, 1);
73897+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
73898 atomic_set(&bat_priv->ttvn, 0);
73899 atomic_set(&bat_priv->tt_local_changes, 0);
73900 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
73901diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
73902index 302efb5..1590365 100644
73903--- a/net/batman-adv/types.h
73904+++ b/net/batman-adv/types.h
73905@@ -38,8 +38,8 @@ struct hard_iface {
73906 int16_t if_num;
73907 char if_status;
73908 struct net_device *net_dev;
73909- atomic_t seqno;
73910- atomic_t frag_seqno;
73911+ atomic_unchecked_t seqno;
73912+ atomic_unchecked_t frag_seqno;
73913 unsigned char *packet_buff;
73914 int packet_len;
73915 struct kobject *hardif_obj;
73916@@ -155,7 +155,7 @@ struct bat_priv {
73917 atomic_t orig_interval; /* uint */
73918 atomic_t hop_penalty; /* uint */
73919 atomic_t log_level; /* uint */
73920- atomic_t bcast_seqno;
73921+ atomic_unchecked_t bcast_seqno;
73922 atomic_t bcast_queue_left;
73923 atomic_t batman_queue_left;
73924 atomic_t ttvn; /* translation table version number */
73925diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
73926index 676f6a6..3b4e668 100644
73927--- a/net/batman-adv/unicast.c
73928+++ b/net/batman-adv/unicast.c
73929@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
73930 frag1->flags = UNI_FRAG_HEAD | large_tail;
73931 frag2->flags = large_tail;
73932
73933- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
73934+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
73935 frag1->seqno = htons(seqno - 1);
73936 frag2->seqno = htons(seqno);
73937
73938diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
73939index 5238b6b..c9798ce 100644
73940--- a/net/bluetooth/hci_conn.c
73941+++ b/net/bluetooth/hci_conn.c
73942@@ -233,7 +233,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
73943 memset(&cp, 0, sizeof(cp));
73944
73945 cp.handle = cpu_to_le16(conn->handle);
73946- memcpy(cp.ltk, ltk, sizeof(ltk));
73947+ memcpy(cp.ltk, ltk, sizeof(cp.ltk));
73948
73949 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
73950 }
73951diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
73952index 6f9c25b..d19fd66 100644
73953--- a/net/bluetooth/l2cap_core.c
73954+++ b/net/bluetooth/l2cap_core.c
73955@@ -2466,8 +2466,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
73956 break;
73957
73958 case L2CAP_CONF_RFC:
73959- if (olen == sizeof(rfc))
73960- memcpy(&rfc, (void *)val, olen);
73961+ if (olen != sizeof(rfc))
73962+ break;
73963+
73964+ memcpy(&rfc, (void *)val, olen);
73965
73966 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
73967 rfc.mode != chan->mode)
73968@@ -2585,8 +2587,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
73969
73970 switch (type) {
73971 case L2CAP_CONF_RFC:
73972- if (olen == sizeof(rfc))
73973- memcpy(&rfc, (void *)val, olen);
73974+ if (olen != sizeof(rfc))
73975+ break;
73976+
73977+ memcpy(&rfc, (void *)val, olen);
73978 goto done;
73979 }
73980 }
73981diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
73982index 5fe2ff3..10968b5 100644
73983--- a/net/bridge/netfilter/ebtables.c
73984+++ b/net/bridge/netfilter/ebtables.c
73985@@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
73986 tmp.valid_hooks = t->table->valid_hooks;
73987 }
73988 mutex_unlock(&ebt_mutex);
73989- if (copy_to_user(user, &tmp, *len) != 0){
73990+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
73991 BUGPRINT("c2u Didn't work\n");
73992 ret = -EFAULT;
73993 break;
73994diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
73995index 5cf5222..6f704ad 100644
73996--- a/net/caif/cfctrl.c
73997+++ b/net/caif/cfctrl.c
73998@@ -9,6 +9,7 @@
73999 #include <linux/stddef.h>
74000 #include <linux/spinlock.h>
74001 #include <linux/slab.h>
74002+#include <linux/sched.h>
74003 #include <net/caif/caif_layer.h>
74004 #include <net/caif/cfpkt.h>
74005 #include <net/caif/cfctrl.h>
74006@@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
74007 memset(&dev_info, 0, sizeof(dev_info));
74008 dev_info.id = 0xff;
74009 cfsrvl_init(&this->serv, 0, &dev_info, false);
74010- atomic_set(&this->req_seq_no, 1);
74011- atomic_set(&this->rsp_seq_no, 1);
74012+ atomic_set_unchecked(&this->req_seq_no, 1);
74013+ atomic_set_unchecked(&this->rsp_seq_no, 1);
74014 this->serv.layer.receive = cfctrl_recv;
74015 sprintf(this->serv.layer.name, "ctrl");
74016 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
74017@@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
74018 struct cfctrl_request_info *req)
74019 {
74020 spin_lock_bh(&ctrl->info_list_lock);
74021- atomic_inc(&ctrl->req_seq_no);
74022- req->sequence_no = atomic_read(&ctrl->req_seq_no);
74023+ atomic_inc_unchecked(&ctrl->req_seq_no);
74024+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
74025 list_add_tail(&req->list, &ctrl->list);
74026 spin_unlock_bh(&ctrl->info_list_lock);
74027 }
74028@@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
74029 if (p != first)
74030 pr_warn("Requests are not received in order\n");
74031
74032- atomic_set(&ctrl->rsp_seq_no,
74033+ atomic_set_unchecked(&ctrl->rsp_seq_no,
74034 p->sequence_no);
74035 list_del(&p->list);
74036 goto out;
74037diff --git a/net/can/gw.c b/net/can/gw.c
74038index 3d79b12..8de85fa 100644
74039--- a/net/can/gw.c
74040+++ b/net/can/gw.c
74041@@ -96,7 +96,7 @@ struct cf_mod {
74042 struct {
74043 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
74044 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
74045- } csumfunc;
74046+ } __no_const csumfunc;
74047 };
74048
74049
74050diff --git a/net/compat.c b/net/compat.c
74051index e055708..3f80795 100644
74052--- a/net/compat.c
74053+++ b/net/compat.c
74054@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
74055 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
74056 __get_user(kmsg->msg_flags, &umsg->msg_flags))
74057 return -EFAULT;
74058- kmsg->msg_name = compat_ptr(tmp1);
74059- kmsg->msg_iov = compat_ptr(tmp2);
74060- kmsg->msg_control = compat_ptr(tmp3);
74061+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
74062+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
74063+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
74064 return 0;
74065 }
74066
74067@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74068
74069 if (kern_msg->msg_namelen) {
74070 if (mode == VERIFY_READ) {
74071- int err = move_addr_to_kernel(kern_msg->msg_name,
74072+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
74073 kern_msg->msg_namelen,
74074 kern_address);
74075 if (err < 0)
74076@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74077 kern_msg->msg_name = NULL;
74078
74079 tot_len = iov_from_user_compat_to_kern(kern_iov,
74080- (struct compat_iovec __user *)kern_msg->msg_iov,
74081+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
74082 kern_msg->msg_iovlen);
74083 if (tot_len >= 0)
74084 kern_msg->msg_iov = kern_iov;
74085@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74086
74087 #define CMSG_COMPAT_FIRSTHDR(msg) \
74088 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
74089- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
74090+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
74091 (struct compat_cmsghdr __user *)NULL)
74092
74093 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
74094 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
74095 (ucmlen) <= (unsigned long) \
74096 ((mhdr)->msg_controllen - \
74097- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
74098+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
74099
74100 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
74101 struct compat_cmsghdr __user *cmsg, int cmsg_len)
74102 {
74103 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
74104- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
74105+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
74106 msg->msg_controllen)
74107 return NULL;
74108 return (struct compat_cmsghdr __user *)ptr;
74109@@ -219,7 +219,7 @@ Efault:
74110
74111 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
74112 {
74113- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74114+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74115 struct compat_cmsghdr cmhdr;
74116 int cmlen;
74117
74118@@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
74119
74120 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
74121 {
74122- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74123+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74124 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
74125 int fdnum = scm->fp->count;
74126 struct file **fp = scm->fp->fp;
74127@@ -372,7 +372,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
74128 return -EFAULT;
74129 old_fs = get_fs();
74130 set_fs(KERNEL_DS);
74131- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
74132+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
74133 set_fs(old_fs);
74134
74135 return err;
74136@@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
74137 len = sizeof(ktime);
74138 old_fs = get_fs();
74139 set_fs(KERNEL_DS);
74140- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
74141+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
74142 set_fs(old_fs);
74143
74144 if (!err) {
74145@@ -576,7 +576,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74146 case MCAST_JOIN_GROUP:
74147 case MCAST_LEAVE_GROUP:
74148 {
74149- struct compat_group_req __user *gr32 = (void *)optval;
74150+ struct compat_group_req __user *gr32 = (void __user *)optval;
74151 struct group_req __user *kgr =
74152 compat_alloc_user_space(sizeof(struct group_req));
74153 u32 interface;
74154@@ -597,7 +597,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74155 case MCAST_BLOCK_SOURCE:
74156 case MCAST_UNBLOCK_SOURCE:
74157 {
74158- struct compat_group_source_req __user *gsr32 = (void *)optval;
74159+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
74160 struct group_source_req __user *kgsr = compat_alloc_user_space(
74161 sizeof(struct group_source_req));
74162 u32 interface;
74163@@ -618,7 +618,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74164 }
74165 case MCAST_MSFILTER:
74166 {
74167- struct compat_group_filter __user *gf32 = (void *)optval;
74168+ struct compat_group_filter __user *gf32 = (void __user *)optval;
74169 struct group_filter __user *kgf;
74170 u32 interface, fmode, numsrc;
74171
74172@@ -656,7 +656,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
74173 char __user *optval, int __user *optlen,
74174 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
74175 {
74176- struct compat_group_filter __user *gf32 = (void *)optval;
74177+ struct compat_group_filter __user *gf32 = (void __user *)optval;
74178 struct group_filter __user *kgf;
74179 int __user *koptlen;
74180 u32 interface, fmode, numsrc;
74181diff --git a/net/core/datagram.c b/net/core/datagram.c
74182index e4fbfd6..6a6ac94 100644
74183--- a/net/core/datagram.c
74184+++ b/net/core/datagram.c
74185@@ -290,7 +290,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
74186 }
74187
74188 kfree_skb(skb);
74189- atomic_inc(&sk->sk_drops);
74190+ atomic_inc_unchecked(&sk->sk_drops);
74191 sk_mem_reclaim_partial(sk);
74192
74193 return err;
74194diff --git a/net/core/dev.c b/net/core/dev.c
74195index 99e1d75..adf968a 100644
74196--- a/net/core/dev.c
74197+++ b/net/core/dev.c
74198@@ -1136,9 +1136,13 @@ void dev_load(struct net *net, const char *name)
74199 if (no_module && capable(CAP_NET_ADMIN))
74200 no_module = request_module("netdev-%s", name);
74201 if (no_module && capable(CAP_SYS_MODULE)) {
74202+#ifdef CONFIG_GRKERNSEC_MODHARDEN
74203+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
74204+#else
74205 if (!request_module("%s", name))
74206 pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
74207 name);
74208+#endif
74209 }
74210 }
74211 EXPORT_SYMBOL(dev_load);
74212@@ -1602,7 +1606,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
74213 {
74214 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
74215 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
74216- atomic_long_inc(&dev->rx_dropped);
74217+ atomic_long_inc_unchecked(&dev->rx_dropped);
74218 kfree_skb(skb);
74219 return NET_RX_DROP;
74220 }
74221@@ -1612,7 +1616,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
74222 nf_reset(skb);
74223
74224 if (unlikely(!is_skb_forwardable(dev, skb))) {
74225- atomic_long_inc(&dev->rx_dropped);
74226+ atomic_long_inc_unchecked(&dev->rx_dropped);
74227 kfree_skb(skb);
74228 return NET_RX_DROP;
74229 }
74230@@ -2042,7 +2046,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
74231
74232 struct dev_gso_cb {
74233 void (*destructor)(struct sk_buff *skb);
74234-};
74235+} __no_const;
74236
74237 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
74238
74239@@ -2898,7 +2902,7 @@ enqueue:
74240
74241 local_irq_restore(flags);
74242
74243- atomic_long_inc(&skb->dev->rx_dropped);
74244+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
74245 kfree_skb(skb);
74246 return NET_RX_DROP;
74247 }
74248@@ -2970,7 +2974,7 @@ int netif_rx_ni(struct sk_buff *skb)
74249 }
74250 EXPORT_SYMBOL(netif_rx_ni);
74251
74252-static void net_tx_action(struct softirq_action *h)
74253+static void net_tx_action(void)
74254 {
74255 struct softnet_data *sd = &__get_cpu_var(softnet_data);
74256
74257@@ -3258,7 +3262,7 @@ ncls:
74258 if (pt_prev) {
74259 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
74260 } else {
74261- atomic_long_inc(&skb->dev->rx_dropped);
74262+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
74263 kfree_skb(skb);
74264 /* Jamal, now you will not able to escape explaining
74265 * me how you were going to use this. :-)
74266@@ -3818,7 +3822,7 @@ void netif_napi_del(struct napi_struct *napi)
74267 }
74268 EXPORT_SYMBOL(netif_napi_del);
74269
74270-static void net_rx_action(struct softirq_action *h)
74271+static void net_rx_action(void)
74272 {
74273 struct softnet_data *sd = &__get_cpu_var(softnet_data);
74274 unsigned long time_limit = jiffies + 2;
74275@@ -4288,8 +4292,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
74276 else
74277 seq_printf(seq, "%04x", ntohs(pt->type));
74278
74279+#ifdef CONFIG_GRKERNSEC_HIDESYM
74280+ seq_printf(seq, " %-8s %p\n",
74281+ pt->dev ? pt->dev->name : "", NULL);
74282+#else
74283 seq_printf(seq, " %-8s %pF\n",
74284 pt->dev ? pt->dev->name : "", pt->func);
74285+#endif
74286 }
74287
74288 return 0;
74289@@ -5839,7 +5848,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
74290 } else {
74291 netdev_stats_to_stats64(storage, &dev->stats);
74292 }
74293- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
74294+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
74295 return storage;
74296 }
74297 EXPORT_SYMBOL(dev_get_stats);
74298diff --git a/net/core/flow.c b/net/core/flow.c
74299index e318c7e..168b1d0 100644
74300--- a/net/core/flow.c
74301+++ b/net/core/flow.c
74302@@ -61,7 +61,7 @@ struct flow_cache {
74303 struct timer_list rnd_timer;
74304 };
74305
74306-atomic_t flow_cache_genid = ATOMIC_INIT(0);
74307+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
74308 EXPORT_SYMBOL(flow_cache_genid);
74309 static struct flow_cache flow_cache_global;
74310 static struct kmem_cache *flow_cachep __read_mostly;
74311@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
74312
74313 static int flow_entry_valid(struct flow_cache_entry *fle)
74314 {
74315- if (atomic_read(&flow_cache_genid) != fle->genid)
74316+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
74317 return 0;
74318 if (fle->object && !fle->object->ops->check(fle->object))
74319 return 0;
74320@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
74321 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
74322 fcp->hash_count++;
74323 }
74324- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
74325+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
74326 flo = fle->object;
74327 if (!flo)
74328 goto ret_object;
74329@@ -280,7 +280,7 @@ nocache:
74330 }
74331 flo = resolver(net, key, family, dir, flo, ctx);
74332 if (fle) {
74333- fle->genid = atomic_read(&flow_cache_genid);
74334+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
74335 if (!IS_ERR(flo))
74336 fle->object = flo;
74337 else
74338diff --git a/net/core/iovec.c b/net/core/iovec.c
74339index 7e7aeb0..2a998cb 100644
74340--- a/net/core/iovec.c
74341+++ b/net/core/iovec.c
74342@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
74343 if (m->msg_namelen) {
74344 if (mode == VERIFY_READ) {
74345 void __user *namep;
74346- namep = (void __user __force *) m->msg_name;
74347+ namep = (void __force_user *) m->msg_name;
74348 err = move_addr_to_kernel(namep, m->msg_namelen,
74349 address);
74350 if (err < 0)
74351@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
74352 }
74353
74354 size = m->msg_iovlen * sizeof(struct iovec);
74355- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
74356+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
74357 return -EFAULT;
74358
74359 m->msg_iov = iov;
74360diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
74361index 90430b7..0032ec0 100644
74362--- a/net/core/rtnetlink.c
74363+++ b/net/core/rtnetlink.c
74364@@ -56,7 +56,7 @@ struct rtnl_link {
74365 rtnl_doit_func doit;
74366 rtnl_dumpit_func dumpit;
74367 rtnl_calcit_func calcit;
74368-};
74369+} __no_const;
74370
74371 static DEFINE_MUTEX(rtnl_mutex);
74372
74373diff --git a/net/core/scm.c b/net/core/scm.c
74374index 611c5ef..88f6d6d 100644
74375--- a/net/core/scm.c
74376+++ b/net/core/scm.c
74377@@ -219,7 +219,7 @@ EXPORT_SYMBOL(__scm_send);
74378 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
74379 {
74380 struct cmsghdr __user *cm
74381- = (__force struct cmsghdr __user *)msg->msg_control;
74382+ = (struct cmsghdr __force_user *)msg->msg_control;
74383 struct cmsghdr cmhdr;
74384 int cmlen = CMSG_LEN(len);
74385 int err;
74386@@ -242,7 +242,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
74387 err = -EFAULT;
74388 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
74389 goto out;
74390- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
74391+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
74392 goto out;
74393 cmlen = CMSG_SPACE(len);
74394 if (msg->msg_controllen < cmlen)
74395@@ -258,7 +258,7 @@ EXPORT_SYMBOL(put_cmsg);
74396 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
74397 {
74398 struct cmsghdr __user *cm
74399- = (__force struct cmsghdr __user*)msg->msg_control;
74400+ = (struct cmsghdr __force_user *)msg->msg_control;
74401
74402 int fdmax = 0;
74403 int fdnum = scm->fp->count;
74404@@ -278,7 +278,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
74405 if (fdnum < fdmax)
74406 fdmax = fdnum;
74407
74408- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
74409+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
74410 i++, cmfptr++)
74411 {
74412 int new_fd;
74413diff --git a/net/core/sock.c b/net/core/sock.c
74414index b2e14c0..6651b32 100644
74415--- a/net/core/sock.c
74416+++ b/net/core/sock.c
74417@@ -340,7 +340,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74418 struct sk_buff_head *list = &sk->sk_receive_queue;
74419
74420 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
74421- atomic_inc(&sk->sk_drops);
74422+ atomic_inc_unchecked(&sk->sk_drops);
74423 trace_sock_rcvqueue_full(sk, skb);
74424 return -ENOMEM;
74425 }
74426@@ -350,7 +350,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74427 return err;
74428
74429 if (!sk_rmem_schedule(sk, skb->truesize)) {
74430- atomic_inc(&sk->sk_drops);
74431+ atomic_inc_unchecked(&sk->sk_drops);
74432 return -ENOBUFS;
74433 }
74434
74435@@ -370,7 +370,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74436 skb_dst_force(skb);
74437
74438 spin_lock_irqsave(&list->lock, flags);
74439- skb->dropcount = atomic_read(&sk->sk_drops);
74440+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
74441 __skb_queue_tail(list, skb);
74442 spin_unlock_irqrestore(&list->lock, flags);
74443
74444@@ -390,7 +390,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
74445 skb->dev = NULL;
74446
74447 if (sk_rcvqueues_full(sk, skb)) {
74448- atomic_inc(&sk->sk_drops);
74449+ atomic_inc_unchecked(&sk->sk_drops);
74450 goto discard_and_relse;
74451 }
74452 if (nested)
74453@@ -408,7 +408,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
74454 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
74455 } else if (sk_add_backlog(sk, skb)) {
74456 bh_unlock_sock(sk);
74457- atomic_inc(&sk->sk_drops);
74458+ atomic_inc_unchecked(&sk->sk_drops);
74459 goto discard_and_relse;
74460 }
74461
74462@@ -984,7 +984,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74463 if (len > sizeof(peercred))
74464 len = sizeof(peercred);
74465 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
74466- if (copy_to_user(optval, &peercred, len))
74467+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
74468 return -EFAULT;
74469 goto lenout;
74470 }
74471@@ -997,7 +997,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74472 return -ENOTCONN;
74473 if (lv < len)
74474 return -EINVAL;
74475- if (copy_to_user(optval, address, len))
74476+ if (len > sizeof(address) || copy_to_user(optval, address, len))
74477 return -EFAULT;
74478 goto lenout;
74479 }
74480@@ -1043,7 +1043,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74481
74482 if (len > lv)
74483 len = lv;
74484- if (copy_to_user(optval, &v, len))
74485+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
74486 return -EFAULT;
74487 lenout:
74488 if (put_user(len, optlen))
74489@@ -2128,7 +2128,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
74490 */
74491 smp_wmb();
74492 atomic_set(&sk->sk_refcnt, 1);
74493- atomic_set(&sk->sk_drops, 0);
74494+ atomic_set_unchecked(&sk->sk_drops, 0);
74495 }
74496 EXPORT_SYMBOL(sock_init_data);
74497
74498diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
74499index b9868e1..849f809 100644
74500--- a/net/core/sock_diag.c
74501+++ b/net/core/sock_diag.c
74502@@ -16,20 +16,27 @@ static DEFINE_MUTEX(sock_diag_table_mutex);
74503
74504 int sock_diag_check_cookie(void *sk, __u32 *cookie)
74505 {
74506+#ifndef CONFIG_GRKERNSEC_HIDESYM
74507 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
74508 cookie[1] != INET_DIAG_NOCOOKIE) &&
74509 ((u32)(unsigned long)sk != cookie[0] ||
74510 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
74511 return -ESTALE;
74512 else
74513+#endif
74514 return 0;
74515 }
74516 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
74517
74518 void sock_diag_save_cookie(void *sk, __u32 *cookie)
74519 {
74520+#ifdef CONFIG_GRKERNSEC_HIDESYM
74521+ cookie[0] = 0;
74522+ cookie[1] = 0;
74523+#else
74524 cookie[0] = (u32)(unsigned long)sk;
74525 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
74526+#endif
74527 }
74528 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
74529
74530diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
74531index 02e75d1..9a57a7c 100644
74532--- a/net/decnet/sysctl_net_decnet.c
74533+++ b/net/decnet/sysctl_net_decnet.c
74534@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
74535
74536 if (len > *lenp) len = *lenp;
74537
74538- if (copy_to_user(buffer, addr, len))
74539+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
74540 return -EFAULT;
74541
74542 *lenp = len;
74543@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
74544
74545 if (len > *lenp) len = *lenp;
74546
74547- if (copy_to_user(buffer, devname, len))
74548+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
74549 return -EFAULT;
74550
74551 *lenp = len;
74552diff --git a/net/econet/Kconfig b/net/econet/Kconfig
74553index 39a2d29..f39c0fe 100644
74554--- a/net/econet/Kconfig
74555+++ b/net/econet/Kconfig
74556@@ -4,7 +4,7 @@
74557
74558 config ECONET
74559 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
74560- depends on EXPERIMENTAL && INET
74561+ depends on EXPERIMENTAL && INET && BROKEN
74562 ---help---
74563 Econet is a fairly old and slow networking protocol mainly used by
74564 Acorn computers to access file and print servers. It uses native
74565diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
74566index cbe3a68..a879b75 100644
74567--- a/net/ipv4/fib_frontend.c
74568+++ b/net/ipv4/fib_frontend.c
74569@@ -969,12 +969,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
74570 #ifdef CONFIG_IP_ROUTE_MULTIPATH
74571 fib_sync_up(dev);
74572 #endif
74573- atomic_inc(&net->ipv4.dev_addr_genid);
74574+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74575 rt_cache_flush(dev_net(dev), -1);
74576 break;
74577 case NETDEV_DOWN:
74578 fib_del_ifaddr(ifa, NULL);
74579- atomic_inc(&net->ipv4.dev_addr_genid);
74580+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74581 if (ifa->ifa_dev->ifa_list == NULL) {
74582 /* Last address was deleted from this interface.
74583 * Disable IP.
74584@@ -1010,7 +1010,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
74585 #ifdef CONFIG_IP_ROUTE_MULTIPATH
74586 fib_sync_up(dev);
74587 #endif
74588- atomic_inc(&net->ipv4.dev_addr_genid);
74589+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74590 rt_cache_flush(dev_net(dev), -1);
74591 break;
74592 case NETDEV_DOWN:
74593diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
74594index 8861f91..ab1e3c1 100644
74595--- a/net/ipv4/fib_semantics.c
74596+++ b/net/ipv4/fib_semantics.c
74597@@ -698,7 +698,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
74598 nh->nh_saddr = inet_select_addr(nh->nh_dev,
74599 nh->nh_gw,
74600 nh->nh_parent->fib_scope);
74601- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
74602+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
74603
74604 return nh->nh_saddr;
74605 }
74606diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
74607index 984ec65..97ac518 100644
74608--- a/net/ipv4/inet_hashtables.c
74609+++ b/net/ipv4/inet_hashtables.c
74610@@ -18,12 +18,15 @@
74611 #include <linux/sched.h>
74612 #include <linux/slab.h>
74613 #include <linux/wait.h>
74614+#include <linux/security.h>
74615
74616 #include <net/inet_connection_sock.h>
74617 #include <net/inet_hashtables.h>
74618 #include <net/secure_seq.h>
74619 #include <net/ip.h>
74620
74621+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
74622+
74623 /*
74624 * Allocate and initialize a new local port bind bucket.
74625 * The bindhash mutex for snum's hash chain must be held here.
74626@@ -530,6 +533,8 @@ ok:
74627 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
74628 spin_unlock(&head->lock);
74629
74630+ gr_update_task_in_ip_table(current, inet_sk(sk));
74631+
74632 if (tw) {
74633 inet_twsk_deschedule(tw, death_row);
74634 while (twrefcnt) {
74635diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
74636index d4d61b6..b81aec8 100644
74637--- a/net/ipv4/inetpeer.c
74638+++ b/net/ipv4/inetpeer.c
74639@@ -487,8 +487,8 @@ relookup:
74640 if (p) {
74641 p->daddr = *daddr;
74642 atomic_set(&p->refcnt, 1);
74643- atomic_set(&p->rid, 0);
74644- atomic_set(&p->ip_id_count,
74645+ atomic_set_unchecked(&p->rid, 0);
74646+ atomic_set_unchecked(&p->ip_id_count,
74647 (daddr->family == AF_INET) ?
74648 secure_ip_id(daddr->addr.a4) :
74649 secure_ipv6_id(daddr->addr.a6));
74650diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
74651index 3727e23..517f5df 100644
74652--- a/net/ipv4/ip_fragment.c
74653+++ b/net/ipv4/ip_fragment.c
74654@@ -318,7 +318,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
74655 return 0;
74656
74657 start = qp->rid;
74658- end = atomic_inc_return(&peer->rid);
74659+ end = atomic_inc_return_unchecked(&peer->rid);
74660 qp->rid = end;
74661
74662 rc = qp->q.fragments && (end - start) > max;
74663diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
74664index 2fd0fba..83fac99 100644
74665--- a/net/ipv4/ip_sockglue.c
74666+++ b/net/ipv4/ip_sockglue.c
74667@@ -1137,7 +1137,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
74668 len = min_t(unsigned int, len, opt->optlen);
74669 if (put_user(len, optlen))
74670 return -EFAULT;
74671- if (copy_to_user(optval, opt->__data, len))
74672+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
74673+ copy_to_user(optval, opt->__data, len))
74674 return -EFAULT;
74675 return 0;
74676 }
74677@@ -1268,7 +1269,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
74678 if (sk->sk_type != SOCK_STREAM)
74679 return -ENOPROTOOPT;
74680
74681- msg.msg_control = optval;
74682+ msg.msg_control = (void __force_kernel *)optval;
74683 msg.msg_controllen = len;
74684 msg.msg_flags = flags;
74685
74686diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
74687index 92ac7e7..13f93d9 100644
74688--- a/net/ipv4/ipconfig.c
74689+++ b/net/ipv4/ipconfig.c
74690@@ -321,7 +321,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
74691
74692 mm_segment_t oldfs = get_fs();
74693 set_fs(get_ds());
74694- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
74695+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
74696 set_fs(oldfs);
74697 return res;
74698 }
74699@@ -332,7 +332,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
74700
74701 mm_segment_t oldfs = get_fs();
74702 set_fs(get_ds());
74703- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
74704+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
74705 set_fs(oldfs);
74706 return res;
74707 }
74708@@ -343,7 +343,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
74709
74710 mm_segment_t oldfs = get_fs();
74711 set_fs(get_ds());
74712- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
74713+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
74714 set_fs(oldfs);
74715 return res;
74716 }
74717diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
74718index 50009c7..5996a9f 100644
74719--- a/net/ipv4/ping.c
74720+++ b/net/ipv4/ping.c
74721@@ -838,7 +838,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
74722 sk_rmem_alloc_get(sp),
74723 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
74724 atomic_read(&sp->sk_refcnt), sp,
74725- atomic_read(&sp->sk_drops), len);
74726+ atomic_read_unchecked(&sp->sk_drops), len);
74727 }
74728
74729 static int ping_seq_show(struct seq_file *seq, void *v)
74730diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
74731index bbd604c..4d5469c 100644
74732--- a/net/ipv4/raw.c
74733+++ b/net/ipv4/raw.c
74734@@ -304,7 +304,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
74735 int raw_rcv(struct sock *sk, struct sk_buff *skb)
74736 {
74737 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
74738- atomic_inc(&sk->sk_drops);
74739+ atomic_inc_unchecked(&sk->sk_drops);
74740 kfree_skb(skb);
74741 return NET_RX_DROP;
74742 }
74743@@ -740,16 +740,20 @@ static int raw_init(struct sock *sk)
74744
74745 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
74746 {
74747+ struct icmp_filter filter;
74748+
74749 if (optlen > sizeof(struct icmp_filter))
74750 optlen = sizeof(struct icmp_filter);
74751- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
74752+ if (copy_from_user(&filter, optval, optlen))
74753 return -EFAULT;
74754+ raw_sk(sk)->filter = filter;
74755 return 0;
74756 }
74757
74758 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
74759 {
74760 int len, ret = -EFAULT;
74761+ struct icmp_filter filter;
74762
74763 if (get_user(len, optlen))
74764 goto out;
74765@@ -759,8 +763,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
74766 if (len > sizeof(struct icmp_filter))
74767 len = sizeof(struct icmp_filter);
74768 ret = -EFAULT;
74769- if (put_user(len, optlen) ||
74770- copy_to_user(optval, &raw_sk(sk)->filter, len))
74771+ filter = raw_sk(sk)->filter;
74772+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
74773 goto out;
74774 ret = 0;
74775 out: return ret;
74776@@ -988,7 +992,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
74777 sk_wmem_alloc_get(sp),
74778 sk_rmem_alloc_get(sp),
74779 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
74780- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
74781+ atomic_read(&sp->sk_refcnt),
74782+#ifdef CONFIG_GRKERNSEC_HIDESYM
74783+ NULL,
74784+#else
74785+ sp,
74786+#endif
74787+ atomic_read_unchecked(&sp->sk_drops));
74788 }
74789
74790 static int raw_seq_show(struct seq_file *seq, void *v)
74791diff --git a/net/ipv4/route.c b/net/ipv4/route.c
74792index 167ea10..4b15883 100644
74793--- a/net/ipv4/route.c
74794+++ b/net/ipv4/route.c
74795@@ -312,7 +312,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
74796
74797 static inline int rt_genid(struct net *net)
74798 {
74799- return atomic_read(&net->ipv4.rt_genid);
74800+ return atomic_read_unchecked(&net->ipv4.rt_genid);
74801 }
74802
74803 #ifdef CONFIG_PROC_FS
74804@@ -936,7 +936,7 @@ static void rt_cache_invalidate(struct net *net)
74805 unsigned char shuffle;
74806
74807 get_random_bytes(&shuffle, sizeof(shuffle));
74808- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
74809+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
74810 inetpeer_invalidate_tree(AF_INET);
74811 }
74812
74813@@ -3009,7 +3009,7 @@ static int rt_fill_info(struct net *net,
74814 error = rt->dst.error;
74815 if (peer) {
74816 inet_peer_refcheck(rt->peer);
74817- id = atomic_read(&peer->ip_id_count) & 0xffff;
74818+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
74819 if (peer->tcp_ts_stamp) {
74820 ts = peer->tcp_ts;
74821 tsage = get_seconds() - peer->tcp_ts_stamp;
74822diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
74823index 0cb86ce..8e7fda8 100644
74824--- a/net/ipv4/tcp_ipv4.c
74825+++ b/net/ipv4/tcp_ipv4.c
74826@@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
74827 EXPORT_SYMBOL(sysctl_tcp_low_latency);
74828
74829
74830+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74831+extern int grsec_enable_blackhole;
74832+#endif
74833+
74834 #ifdef CONFIG_TCP_MD5SIG
74835 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
74836 __be32 daddr, __be32 saddr, const struct tcphdr *th);
74837@@ -1641,6 +1645,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
74838 return 0;
74839
74840 reset:
74841+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74842+ if (!grsec_enable_blackhole)
74843+#endif
74844 tcp_v4_send_reset(rsk, skb);
74845 discard:
74846 kfree_skb(skb);
74847@@ -1703,12 +1710,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
74848 TCP_SKB_CB(skb)->sacked = 0;
74849
74850 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
74851- if (!sk)
74852+ if (!sk) {
74853+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74854+ ret = 1;
74855+#endif
74856 goto no_tcp_socket;
74857-
74858+ }
74859 process:
74860- if (sk->sk_state == TCP_TIME_WAIT)
74861+ if (sk->sk_state == TCP_TIME_WAIT) {
74862+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74863+ ret = 2;
74864+#endif
74865 goto do_time_wait;
74866+ }
74867
74868 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
74869 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
74870@@ -1758,6 +1772,10 @@ no_tcp_socket:
74871 bad_packet:
74872 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
74873 } else {
74874+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74875+ if (!grsec_enable_blackhole || (ret == 1 &&
74876+ (skb->dev->flags & IFF_LOOPBACK)))
74877+#endif
74878 tcp_v4_send_reset(NULL, skb);
74879 }
74880
74881@@ -2419,7 +2437,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
74882 0, /* non standard timer */
74883 0, /* open_requests have no inode */
74884 atomic_read(&sk->sk_refcnt),
74885+#ifdef CONFIG_GRKERNSEC_HIDESYM
74886+ NULL,
74887+#else
74888 req,
74889+#endif
74890 len);
74891 }
74892
74893@@ -2469,7 +2491,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
74894 sock_i_uid(sk),
74895 icsk->icsk_probes_out,
74896 sock_i_ino(sk),
74897- atomic_read(&sk->sk_refcnt), sk,
74898+ atomic_read(&sk->sk_refcnt),
74899+#ifdef CONFIG_GRKERNSEC_HIDESYM
74900+ NULL,
74901+#else
74902+ sk,
74903+#endif
74904 jiffies_to_clock_t(icsk->icsk_rto),
74905 jiffies_to_clock_t(icsk->icsk_ack.ato),
74906 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
74907@@ -2497,7 +2524,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
74908 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
74909 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
74910 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
74911- atomic_read(&tw->tw_refcnt), tw, len);
74912+ atomic_read(&tw->tw_refcnt),
74913+#ifdef CONFIG_GRKERNSEC_HIDESYM
74914+ NULL,
74915+#else
74916+ tw,
74917+#endif
74918+ len);
74919 }
74920
74921 #define TMPSZ 150
74922diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
74923index 3cabafb..640525b 100644
74924--- a/net/ipv4/tcp_minisocks.c
74925+++ b/net/ipv4/tcp_minisocks.c
74926@@ -27,6 +27,10 @@
74927 #include <net/inet_common.h>
74928 #include <net/xfrm.h>
74929
74930+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74931+extern int grsec_enable_blackhole;
74932+#endif
74933+
74934 int sysctl_tcp_syncookies __read_mostly = 1;
74935 EXPORT_SYMBOL(sysctl_tcp_syncookies);
74936
74937@@ -753,6 +757,10 @@ listen_overflow:
74938
74939 embryonic_reset:
74940 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
74941+
74942+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74943+ if (!grsec_enable_blackhole)
74944+#endif
74945 if (!(flg & TCP_FLAG_RST))
74946 req->rsk_ops->send_reset(sk, skb);
74947
74948diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
74949index a981cdc..48f4c3a 100644
74950--- a/net/ipv4/tcp_probe.c
74951+++ b/net/ipv4/tcp_probe.c
74952@@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
74953 if (cnt + width >= len)
74954 break;
74955
74956- if (copy_to_user(buf + cnt, tbuf, width))
74957+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
74958 return -EFAULT;
74959 cnt += width;
74960 }
74961diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
74962index 34d4a02..3b57f86 100644
74963--- a/net/ipv4/tcp_timer.c
74964+++ b/net/ipv4/tcp_timer.c
74965@@ -22,6 +22,10 @@
74966 #include <linux/gfp.h>
74967 #include <net/tcp.h>
74968
74969+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74970+extern int grsec_lastack_retries;
74971+#endif
74972+
74973 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
74974 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
74975 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
74976@@ -196,6 +200,13 @@ static int tcp_write_timeout(struct sock *sk)
74977 }
74978 }
74979
74980+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74981+ if ((sk->sk_state == TCP_LAST_ACK) &&
74982+ (grsec_lastack_retries > 0) &&
74983+ (grsec_lastack_retries < retry_until))
74984+ retry_until = grsec_lastack_retries;
74985+#endif
74986+
74987 if (retransmits_timed_out(sk, retry_until,
74988 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
74989 /* Has it gone just too far? */
74990diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
74991index fe14105..0618260 100644
74992--- a/net/ipv4/udp.c
74993+++ b/net/ipv4/udp.c
74994@@ -87,6 +87,7 @@
74995 #include <linux/types.h>
74996 #include <linux/fcntl.h>
74997 #include <linux/module.h>
74998+#include <linux/security.h>
74999 #include <linux/socket.h>
75000 #include <linux/sockios.h>
75001 #include <linux/igmp.h>
75002@@ -109,6 +110,10 @@
75003 #include <trace/events/udp.h>
75004 #include "udp_impl.h"
75005
75006+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75007+extern int grsec_enable_blackhole;
75008+#endif
75009+
75010 struct udp_table udp_table __read_mostly;
75011 EXPORT_SYMBOL(udp_table);
75012
75013@@ -567,6 +572,9 @@ found:
75014 return s;
75015 }
75016
75017+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
75018+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
75019+
75020 /*
75021 * This routine is called by the ICMP module when it gets some
75022 * sort of error condition. If err < 0 then the socket should
75023@@ -858,9 +866,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
75024 dport = usin->sin_port;
75025 if (dport == 0)
75026 return -EINVAL;
75027+
75028+ err = gr_search_udp_sendmsg(sk, usin);
75029+ if (err)
75030+ return err;
75031 } else {
75032 if (sk->sk_state != TCP_ESTABLISHED)
75033 return -EDESTADDRREQ;
75034+
75035+ err = gr_search_udp_sendmsg(sk, NULL);
75036+ if (err)
75037+ return err;
75038+
75039 daddr = inet->inet_daddr;
75040 dport = inet->inet_dport;
75041 /* Open fast path for connected socket.
75042@@ -1102,7 +1119,7 @@ static unsigned int first_packet_length(struct sock *sk)
75043 udp_lib_checksum_complete(skb)) {
75044 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
75045 IS_UDPLITE(sk));
75046- atomic_inc(&sk->sk_drops);
75047+ atomic_inc_unchecked(&sk->sk_drops);
75048 __skb_unlink(skb, rcvq);
75049 __skb_queue_tail(&list_kill, skb);
75050 }
75051@@ -1188,6 +1205,10 @@ try_again:
75052 if (!skb)
75053 goto out;
75054
75055+ err = gr_search_udp_recvmsg(sk, skb);
75056+ if (err)
75057+ goto out_free;
75058+
75059 ulen = skb->len - sizeof(struct udphdr);
75060 copied = len;
75061 if (copied > ulen)
75062@@ -1489,7 +1510,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75063
75064 drop:
75065 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
75066- atomic_inc(&sk->sk_drops);
75067+ atomic_inc_unchecked(&sk->sk_drops);
75068 kfree_skb(skb);
75069 return -1;
75070 }
75071@@ -1508,7 +1529,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
75072 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
75073
75074 if (!skb1) {
75075- atomic_inc(&sk->sk_drops);
75076+ atomic_inc_unchecked(&sk->sk_drops);
75077 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
75078 IS_UDPLITE(sk));
75079 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
75080@@ -1677,6 +1698,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75081 goto csum_error;
75082
75083 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
75084+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75085+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
75086+#endif
75087 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
75088
75089 /*
75090@@ -2094,8 +2118,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
75091 sk_wmem_alloc_get(sp),
75092 sk_rmem_alloc_get(sp),
75093 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75094- atomic_read(&sp->sk_refcnt), sp,
75095- atomic_read(&sp->sk_drops), len);
75096+ atomic_read(&sp->sk_refcnt),
75097+#ifdef CONFIG_GRKERNSEC_HIDESYM
75098+ NULL,
75099+#else
75100+ sp,
75101+#endif
75102+ atomic_read_unchecked(&sp->sk_drops), len);
75103 }
75104
75105 int udp4_seq_show(struct seq_file *seq, void *v)
75106diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
75107index 7d5cb97..c56564f 100644
75108--- a/net/ipv6/addrconf.c
75109+++ b/net/ipv6/addrconf.c
75110@@ -2142,7 +2142,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
75111 p.iph.ihl = 5;
75112 p.iph.protocol = IPPROTO_IPV6;
75113 p.iph.ttl = 64;
75114- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
75115+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
75116
75117 if (ops->ndo_do_ioctl) {
75118 mm_segment_t oldfs = get_fs();
75119diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
75120index 02dd203..e03fcc9 100644
75121--- a/net/ipv6/inet6_connection_sock.c
75122+++ b/net/ipv6/inet6_connection_sock.c
75123@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
75124 #ifdef CONFIG_XFRM
75125 {
75126 struct rt6_info *rt = (struct rt6_info *)dst;
75127- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
75128+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
75129 }
75130 #endif
75131 }
75132@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
75133 #ifdef CONFIG_XFRM
75134 if (dst) {
75135 struct rt6_info *rt = (struct rt6_info *)dst;
75136- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
75137+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
75138 __sk_dst_reset(sk);
75139 dst = NULL;
75140 }
75141diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
75142index 63dd1f8..e7f53ca 100644
75143--- a/net/ipv6/ipv6_sockglue.c
75144+++ b/net/ipv6/ipv6_sockglue.c
75145@@ -990,7 +990,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
75146 if (sk->sk_type != SOCK_STREAM)
75147 return -ENOPROTOOPT;
75148
75149- msg.msg_control = optval;
75150+ msg.msg_control = (void __force_kernel *)optval;
75151 msg.msg_controllen = len;
75152 msg.msg_flags = flags;
75153
75154diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
75155index 5bddea7..82d9d67 100644
75156--- a/net/ipv6/raw.c
75157+++ b/net/ipv6/raw.c
75158@@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
75159 {
75160 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
75161 skb_checksum_complete(skb)) {
75162- atomic_inc(&sk->sk_drops);
75163+ atomic_inc_unchecked(&sk->sk_drops);
75164 kfree_skb(skb);
75165 return NET_RX_DROP;
75166 }
75167@@ -405,7 +405,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
75168 struct raw6_sock *rp = raw6_sk(sk);
75169
75170 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
75171- atomic_inc(&sk->sk_drops);
75172+ atomic_inc_unchecked(&sk->sk_drops);
75173 kfree_skb(skb);
75174 return NET_RX_DROP;
75175 }
75176@@ -429,7 +429,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
75177
75178 if (inet->hdrincl) {
75179 if (skb_checksum_complete(skb)) {
75180- atomic_inc(&sk->sk_drops);
75181+ atomic_inc_unchecked(&sk->sk_drops);
75182 kfree_skb(skb);
75183 return NET_RX_DROP;
75184 }
75185@@ -602,7 +602,7 @@ out:
75186 return err;
75187 }
75188
75189-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
75190+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
75191 struct flowi6 *fl6, struct dst_entry **dstp,
75192 unsigned int flags)
75193 {
75194@@ -914,12 +914,15 @@ do_confirm:
75195 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
75196 char __user *optval, int optlen)
75197 {
75198+ struct icmp6_filter filter;
75199+
75200 switch (optname) {
75201 case ICMPV6_FILTER:
75202 if (optlen > sizeof(struct icmp6_filter))
75203 optlen = sizeof(struct icmp6_filter);
75204- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
75205+ if (copy_from_user(&filter, optval, optlen))
75206 return -EFAULT;
75207+ raw6_sk(sk)->filter = filter;
75208 return 0;
75209 default:
75210 return -ENOPROTOOPT;
75211@@ -932,6 +935,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
75212 char __user *optval, int __user *optlen)
75213 {
75214 int len;
75215+ struct icmp6_filter filter;
75216
75217 switch (optname) {
75218 case ICMPV6_FILTER:
75219@@ -943,7 +947,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
75220 len = sizeof(struct icmp6_filter);
75221 if (put_user(len, optlen))
75222 return -EFAULT;
75223- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
75224+ filter = raw6_sk(sk)->filter;
75225+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
75226 return -EFAULT;
75227 return 0;
75228 default:
75229@@ -1250,7 +1255,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
75230 0, 0L, 0,
75231 sock_i_uid(sp), 0,
75232 sock_i_ino(sp),
75233- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
75234+ atomic_read(&sp->sk_refcnt),
75235+#ifdef CONFIG_GRKERNSEC_HIDESYM
75236+ NULL,
75237+#else
75238+ sp,
75239+#endif
75240+ atomic_read_unchecked(&sp->sk_drops));
75241 }
75242
75243 static int raw6_seq_show(struct seq_file *seq, void *v)
75244diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
75245index 98256cf..7f16dbd 100644
75246--- a/net/ipv6/tcp_ipv6.c
75247+++ b/net/ipv6/tcp_ipv6.c
75248@@ -94,6 +94,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
75249 }
75250 #endif
75251
75252+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75253+extern int grsec_enable_blackhole;
75254+#endif
75255+
75256 static void tcp_v6_hash(struct sock *sk)
75257 {
75258 if (sk->sk_state != TCP_CLOSE) {
75259@@ -1542,6 +1546,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
75260 return 0;
75261
75262 reset:
75263+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75264+ if (!grsec_enable_blackhole)
75265+#endif
75266 tcp_v6_send_reset(sk, skb);
75267 discard:
75268 if (opt_skb)
75269@@ -1623,12 +1630,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
75270 TCP_SKB_CB(skb)->sacked = 0;
75271
75272 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
75273- if (!sk)
75274+ if (!sk) {
75275+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75276+ ret = 1;
75277+#endif
75278 goto no_tcp_socket;
75279+ }
75280
75281 process:
75282- if (sk->sk_state == TCP_TIME_WAIT)
75283+ if (sk->sk_state == TCP_TIME_WAIT) {
75284+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75285+ ret = 2;
75286+#endif
75287 goto do_time_wait;
75288+ }
75289
75290 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
75291 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
75292@@ -1676,6 +1691,10 @@ no_tcp_socket:
75293 bad_packet:
75294 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
75295 } else {
75296+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75297+ if (!grsec_enable_blackhole || (ret == 1 &&
75298+ (skb->dev->flags & IFF_LOOPBACK)))
75299+#endif
75300 tcp_v6_send_reset(NULL, skb);
75301 }
75302
75303@@ -1930,7 +1949,13 @@ static void get_openreq6(struct seq_file *seq,
75304 uid,
75305 0, /* non standard timer */
75306 0, /* open_requests have no inode */
75307- 0, req);
75308+ 0,
75309+#ifdef CONFIG_GRKERNSEC_HIDESYM
75310+ NULL
75311+#else
75312+ req
75313+#endif
75314+ );
75315 }
75316
75317 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
75318@@ -1980,7 +2005,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
75319 sock_i_uid(sp),
75320 icsk->icsk_probes_out,
75321 sock_i_ino(sp),
75322- atomic_read(&sp->sk_refcnt), sp,
75323+ atomic_read(&sp->sk_refcnt),
75324+#ifdef CONFIG_GRKERNSEC_HIDESYM
75325+ NULL,
75326+#else
75327+ sp,
75328+#endif
75329 jiffies_to_clock_t(icsk->icsk_rto),
75330 jiffies_to_clock_t(icsk->icsk_ack.ato),
75331 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
75332@@ -2015,7 +2045,13 @@ static void get_timewait6_sock(struct seq_file *seq,
75333 dest->s6_addr32[2], dest->s6_addr32[3], destp,
75334 tw->tw_substate, 0, 0,
75335 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
75336- atomic_read(&tw->tw_refcnt), tw);
75337+ atomic_read(&tw->tw_refcnt),
75338+#ifdef CONFIG_GRKERNSEC_HIDESYM
75339+ NULL
75340+#else
75341+ tw
75342+#endif
75343+ );
75344 }
75345
75346 static int tcp6_seq_show(struct seq_file *seq, void *v)
75347diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
75348index 37b0699..d323408 100644
75349--- a/net/ipv6/udp.c
75350+++ b/net/ipv6/udp.c
75351@@ -50,6 +50,10 @@
75352 #include <linux/seq_file.h>
75353 #include "udp_impl.h"
75354
75355+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75356+extern int grsec_enable_blackhole;
75357+#endif
75358+
75359 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
75360 {
75361 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
75362@@ -551,7 +555,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
75363
75364 return 0;
75365 drop:
75366- atomic_inc(&sk->sk_drops);
75367+ atomic_inc_unchecked(&sk->sk_drops);
75368 drop_no_sk_drops_inc:
75369 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
75370 kfree_skb(skb);
75371@@ -627,7 +631,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
75372 continue;
75373 }
75374 drop:
75375- atomic_inc(&sk->sk_drops);
75376+ atomic_inc_unchecked(&sk->sk_drops);
75377 UDP6_INC_STATS_BH(sock_net(sk),
75378 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
75379 UDP6_INC_STATS_BH(sock_net(sk),
75380@@ -782,6 +786,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75381 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
75382 proto == IPPROTO_UDPLITE);
75383
75384+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75385+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
75386+#endif
75387 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
75388
75389 kfree_skb(skb);
75390@@ -798,7 +805,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75391 if (!sock_owned_by_user(sk))
75392 udpv6_queue_rcv_skb(sk, skb);
75393 else if (sk_add_backlog(sk, skb)) {
75394- atomic_inc(&sk->sk_drops);
75395+ atomic_inc_unchecked(&sk->sk_drops);
75396 bh_unlock_sock(sk);
75397 sock_put(sk);
75398 goto discard;
75399@@ -1411,8 +1418,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
75400 0, 0L, 0,
75401 sock_i_uid(sp), 0,
75402 sock_i_ino(sp),
75403- atomic_read(&sp->sk_refcnt), sp,
75404- atomic_read(&sp->sk_drops));
75405+ atomic_read(&sp->sk_refcnt),
75406+#ifdef CONFIG_GRKERNSEC_HIDESYM
75407+ NULL,
75408+#else
75409+ sp,
75410+#endif
75411+ atomic_read_unchecked(&sp->sk_drops));
75412 }
75413
75414 int udp6_seq_show(struct seq_file *seq, void *v)
75415diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
75416index 6b9d5a0..4dffaf1 100644
75417--- a/net/irda/ircomm/ircomm_tty.c
75418+++ b/net/irda/ircomm/ircomm_tty.c
75419@@ -281,16 +281,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75420 add_wait_queue(&self->open_wait, &wait);
75421
75422 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
75423- __FILE__,__LINE__, tty->driver->name, self->open_count );
75424+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
75425
75426 /* As far as I can see, we protect open_count - Jean II */
75427 spin_lock_irqsave(&self->spinlock, flags);
75428 if (!tty_hung_up_p(filp)) {
75429 extra_count = 1;
75430- self->open_count--;
75431+ local_dec(&self->open_count);
75432 }
75433 spin_unlock_irqrestore(&self->spinlock, flags);
75434- self->blocked_open++;
75435+ local_inc(&self->blocked_open);
75436
75437 while (1) {
75438 if (tty->termios->c_cflag & CBAUD) {
75439@@ -330,7 +330,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75440 }
75441
75442 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
75443- __FILE__,__LINE__, tty->driver->name, self->open_count );
75444+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
75445
75446 schedule();
75447 }
75448@@ -341,13 +341,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75449 if (extra_count) {
75450 /* ++ is not atomic, so this should be protected - Jean II */
75451 spin_lock_irqsave(&self->spinlock, flags);
75452- self->open_count++;
75453+ local_inc(&self->open_count);
75454 spin_unlock_irqrestore(&self->spinlock, flags);
75455 }
75456- self->blocked_open--;
75457+ local_dec(&self->blocked_open);
75458
75459 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
75460- __FILE__,__LINE__, tty->driver->name, self->open_count);
75461+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
75462
75463 if (!retval)
75464 self->flags |= ASYNC_NORMAL_ACTIVE;
75465@@ -412,14 +412,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
75466 }
75467 /* ++ is not atomic, so this should be protected - Jean II */
75468 spin_lock_irqsave(&self->spinlock, flags);
75469- self->open_count++;
75470+ local_inc(&self->open_count);
75471
75472 tty->driver_data = self;
75473 self->tty = tty;
75474 spin_unlock_irqrestore(&self->spinlock, flags);
75475
75476 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
75477- self->line, self->open_count);
75478+ self->line, local_read(&self->open_count));
75479
75480 /* Not really used by us, but lets do it anyway */
75481 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
75482@@ -505,7 +505,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75483 return;
75484 }
75485
75486- if ((tty->count == 1) && (self->open_count != 1)) {
75487+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
75488 /*
75489 * Uh, oh. tty->count is 1, which means that the tty
75490 * structure will be freed. state->count should always
75491@@ -515,16 +515,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75492 */
75493 IRDA_DEBUG(0, "%s(), bad serial port count; "
75494 "tty->count is 1, state->count is %d\n", __func__ ,
75495- self->open_count);
75496- self->open_count = 1;
75497+ local_read(&self->open_count));
75498+ local_set(&self->open_count, 1);
75499 }
75500
75501- if (--self->open_count < 0) {
75502+ if (local_dec_return(&self->open_count) < 0) {
75503 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
75504- __func__, self->line, self->open_count);
75505- self->open_count = 0;
75506+ __func__, self->line, local_read(&self->open_count));
75507+ local_set(&self->open_count, 0);
75508 }
75509- if (self->open_count) {
75510+ if (local_read(&self->open_count)) {
75511 spin_unlock_irqrestore(&self->spinlock, flags);
75512
75513 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
75514@@ -556,7 +556,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75515 tty->closing = 0;
75516 self->tty = NULL;
75517
75518- if (self->blocked_open) {
75519+ if (local_read(&self->blocked_open)) {
75520 if (self->close_delay)
75521 schedule_timeout_interruptible(self->close_delay);
75522 wake_up_interruptible(&self->open_wait);
75523@@ -1008,7 +1008,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
75524 spin_lock_irqsave(&self->spinlock, flags);
75525 self->flags &= ~ASYNC_NORMAL_ACTIVE;
75526 self->tty = NULL;
75527- self->open_count = 0;
75528+ local_set(&self->open_count, 0);
75529 spin_unlock_irqrestore(&self->spinlock, flags);
75530
75531 wake_up_interruptible(&self->open_wait);
75532@@ -1355,7 +1355,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
75533 seq_putc(m, '\n');
75534
75535 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
75536- seq_printf(m, "Open count: %d\n", self->open_count);
75537+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
75538 seq_printf(m, "Max data size: %d\n", self->max_data_size);
75539 seq_printf(m, "Max header size: %d\n", self->max_header_size);
75540
75541diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
75542index 07d7d55..541de95 100644
75543--- a/net/iucv/af_iucv.c
75544+++ b/net/iucv/af_iucv.c
75545@@ -783,10 +783,10 @@ static int iucv_sock_autobind(struct sock *sk)
75546
75547 write_lock_bh(&iucv_sk_list.lock);
75548
75549- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
75550+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
75551 while (__iucv_get_sock_by_name(name)) {
75552 sprintf(name, "%08x",
75553- atomic_inc_return(&iucv_sk_list.autobind_name));
75554+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
75555 }
75556
75557 write_unlock_bh(&iucv_sk_list.lock);
75558diff --git a/net/key/af_key.c b/net/key/af_key.c
75559index 7e5d927..cdbb54e 100644
75560--- a/net/key/af_key.c
75561+++ b/net/key/af_key.c
75562@@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
75563 static u32 get_acqseq(void)
75564 {
75565 u32 res;
75566- static atomic_t acqseq;
75567+ static atomic_unchecked_t acqseq;
75568
75569 do {
75570- res = atomic_inc_return(&acqseq);
75571+ res = atomic_inc_return_unchecked(&acqseq);
75572 } while (!res);
75573 return res;
75574 }
75575diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
75576index db8fae5..ff070cd 100644
75577--- a/net/mac80211/ieee80211_i.h
75578+++ b/net/mac80211/ieee80211_i.h
75579@@ -28,6 +28,7 @@
75580 #include <net/ieee80211_radiotap.h>
75581 #include <net/cfg80211.h>
75582 #include <net/mac80211.h>
75583+#include <asm/local.h>
75584 #include "key.h"
75585 #include "sta_info.h"
75586
75587@@ -842,7 +843,7 @@ struct ieee80211_local {
75588 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
75589 spinlock_t queue_stop_reason_lock;
75590
75591- int open_count;
75592+ local_t open_count;
75593 int monitors, cooked_mntrs;
75594 /* number of interfaces with corresponding FIF_ flags */
75595 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
75596diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
75597index 48f937e..4ccd7b8 100644
75598--- a/net/mac80211/iface.c
75599+++ b/net/mac80211/iface.c
75600@@ -222,7 +222,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75601 break;
75602 }
75603
75604- if (local->open_count == 0) {
75605+ if (local_read(&local->open_count) == 0) {
75606 res = drv_start(local);
75607 if (res)
75608 goto err_del_bss;
75609@@ -246,7 +246,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75610 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
75611
75612 if (!is_valid_ether_addr(dev->dev_addr)) {
75613- if (!local->open_count)
75614+ if (!local_read(&local->open_count))
75615 drv_stop(local);
75616 return -EADDRNOTAVAIL;
75617 }
75618@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75619 mutex_unlock(&local->mtx);
75620
75621 if (coming_up)
75622- local->open_count++;
75623+ local_inc(&local->open_count);
75624
75625 if (hw_reconf_flags)
75626 ieee80211_hw_config(local, hw_reconf_flags);
75627@@ -360,7 +360,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75628 err_del_interface:
75629 drv_remove_interface(local, sdata);
75630 err_stop:
75631- if (!local->open_count)
75632+ if (!local_read(&local->open_count))
75633 drv_stop(local);
75634 err_del_bss:
75635 sdata->bss = NULL;
75636@@ -491,7 +491,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
75637 }
75638
75639 if (going_down)
75640- local->open_count--;
75641+ local_dec(&local->open_count);
75642
75643 switch (sdata->vif.type) {
75644 case NL80211_IFTYPE_AP_VLAN:
75645@@ -562,7 +562,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
75646
75647 ieee80211_recalc_ps(local, -1);
75648
75649- if (local->open_count == 0) {
75650+ if (local_read(&local->open_count) == 0) {
75651 if (local->ops->napi_poll)
75652 napi_disable(&local->napi);
75653 ieee80211_clear_tx_pending(local);
75654diff --git a/net/mac80211/main.c b/net/mac80211/main.c
75655index 1633648..d45ebfa 100644
75656--- a/net/mac80211/main.c
75657+++ b/net/mac80211/main.c
75658@@ -164,7 +164,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
75659 local->hw.conf.power_level = power;
75660 }
75661
75662- if (changed && local->open_count) {
75663+ if (changed && local_read(&local->open_count)) {
75664 ret = drv_config(local, changed);
75665 /*
75666 * Goal:
75667diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
75668index ef8eba1..5c63952 100644
75669--- a/net/mac80211/pm.c
75670+++ b/net/mac80211/pm.c
75671@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75672 struct ieee80211_sub_if_data *sdata;
75673 struct sta_info *sta;
75674
75675- if (!local->open_count)
75676+ if (!local_read(&local->open_count))
75677 goto suspend;
75678
75679 ieee80211_scan_cancel(local);
75680@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75681 cancel_work_sync(&local->dynamic_ps_enable_work);
75682 del_timer_sync(&local->dynamic_ps_timer);
75683
75684- local->wowlan = wowlan && local->open_count;
75685+ local->wowlan = wowlan && local_read(&local->open_count);
75686 if (local->wowlan) {
75687 int err = drv_suspend(local, wowlan);
75688 if (err < 0) {
75689@@ -128,7 +128,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75690 }
75691
75692 /* stop hardware - this must stop RX */
75693- if (local->open_count)
75694+ if (local_read(&local->open_count))
75695 ieee80211_stop_device(local);
75696
75697 suspend:
75698diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
75699index 3313c11..bec9f17 100644
75700--- a/net/mac80211/rate.c
75701+++ b/net/mac80211/rate.c
75702@@ -494,7 +494,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
75703
75704 ASSERT_RTNL();
75705
75706- if (local->open_count)
75707+ if (local_read(&local->open_count))
75708 return -EBUSY;
75709
75710 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
75711diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
75712index c97a065..ff61928 100644
75713--- a/net/mac80211/rc80211_pid_debugfs.c
75714+++ b/net/mac80211/rc80211_pid_debugfs.c
75715@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
75716
75717 spin_unlock_irqrestore(&events->lock, status);
75718
75719- if (copy_to_user(buf, pb, p))
75720+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
75721 return -EFAULT;
75722
75723 return p;
75724diff --git a/net/mac80211/util.c b/net/mac80211/util.c
75725index eb9d7c0..d34b832 100644
75726--- a/net/mac80211/util.c
75727+++ b/net/mac80211/util.c
75728@@ -1179,7 +1179,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
75729 }
75730 #endif
75731 /* everything else happens only if HW was up & running */
75732- if (!local->open_count)
75733+ if (!local_read(&local->open_count))
75734 goto wake_up;
75735
75736 /*
75737diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
75738index 0c6f67e..d02cdfc 100644
75739--- a/net/netfilter/Kconfig
75740+++ b/net/netfilter/Kconfig
75741@@ -836,6 +836,16 @@ config NETFILTER_XT_MATCH_ESP
75742
75743 To compile it as a module, choose M here. If unsure, say N.
75744
75745+config NETFILTER_XT_MATCH_GRADM
75746+ tristate '"gradm" match support'
75747+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
75748+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
75749+ ---help---
75750+ The gradm match allows to match on grsecurity RBAC being enabled.
75751+ It is useful when iptables rules are applied early on bootup to
75752+ prevent connections to the machine (except from a trusted host)
75753+ while the RBAC system is disabled.
75754+
75755 config NETFILTER_XT_MATCH_HASHLIMIT
75756 tristate '"hashlimit" match support'
75757 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
75758diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
75759index ca36765..0882e7c 100644
75760--- a/net/netfilter/Makefile
75761+++ b/net/netfilter/Makefile
75762@@ -86,6 +86,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
75763 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
75764 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
75765 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
75766+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
75767 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
75768 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
75769 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
75770diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
75771index 29fa5ba..8debc79 100644
75772--- a/net/netfilter/ipvs/ip_vs_conn.c
75773+++ b/net/netfilter/ipvs/ip_vs_conn.c
75774@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
75775 /* Increase the refcnt counter of the dest */
75776 atomic_inc(&dest->refcnt);
75777
75778- conn_flags = atomic_read(&dest->conn_flags);
75779+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
75780 if (cp->protocol != IPPROTO_UDP)
75781 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
75782 /* Bind with the destination and its corresponding transmitter */
75783@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
75784 atomic_set(&cp->refcnt, 1);
75785
75786 atomic_set(&cp->n_control, 0);
75787- atomic_set(&cp->in_pkts, 0);
75788+ atomic_set_unchecked(&cp->in_pkts, 0);
75789
75790 atomic_inc(&ipvs->conn_count);
75791 if (flags & IP_VS_CONN_F_NO_CPORT)
75792@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
75793
75794 /* Don't drop the entry if its number of incoming packets is not
75795 located in [0, 8] */
75796- i = atomic_read(&cp->in_pkts);
75797+ i = atomic_read_unchecked(&cp->in_pkts);
75798 if (i > 8 || i < 0) return 0;
75799
75800 if (!todrop_rate[i]) return 0;
75801diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
75802index 00bdb1d..6725a48 100644
75803--- a/net/netfilter/ipvs/ip_vs_core.c
75804+++ b/net/netfilter/ipvs/ip_vs_core.c
75805@@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
75806 ret = cp->packet_xmit(skb, cp, pd->pp);
75807 /* do not touch skb anymore */
75808
75809- atomic_inc(&cp->in_pkts);
75810+ atomic_inc_unchecked(&cp->in_pkts);
75811 ip_vs_conn_put(cp);
75812 return ret;
75813 }
75814@@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
75815 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
75816 pkts = sysctl_sync_threshold(ipvs);
75817 else
75818- pkts = atomic_add_return(1, &cp->in_pkts);
75819+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
75820
75821 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
75822 cp->protocol == IPPROTO_SCTP) {
75823diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
75824index f558998..9cdff60 100644
75825--- a/net/netfilter/ipvs/ip_vs_ctl.c
75826+++ b/net/netfilter/ipvs/ip_vs_ctl.c
75827@@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
75828 ip_vs_rs_hash(ipvs, dest);
75829 write_unlock_bh(&ipvs->rs_lock);
75830 }
75831- atomic_set(&dest->conn_flags, conn_flags);
75832+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
75833
75834 /* bind the service */
75835 if (!dest->svc) {
75836@@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
75837 " %-7s %-6d %-10d %-10d\n",
75838 &dest->addr.in6,
75839 ntohs(dest->port),
75840- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
75841+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
75842 atomic_read(&dest->weight),
75843 atomic_read(&dest->activeconns),
75844 atomic_read(&dest->inactconns));
75845@@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
75846 "%-7s %-6d %-10d %-10d\n",
75847 ntohl(dest->addr.ip),
75848 ntohs(dest->port),
75849- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
75850+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
75851 atomic_read(&dest->weight),
75852 atomic_read(&dest->activeconns),
75853 atomic_read(&dest->inactconns));
75854@@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
75855
75856 entry.addr = dest->addr.ip;
75857 entry.port = dest->port;
75858- entry.conn_flags = atomic_read(&dest->conn_flags);
75859+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
75860 entry.weight = atomic_read(&dest->weight);
75861 entry.u_threshold = dest->u_threshold;
75862 entry.l_threshold = dest->l_threshold;
75863@@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
75864 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
75865
75866 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
75867- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
75868+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
75869 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
75870 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
75871 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
75872diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
75873index 8a0d6d6..90ec197 100644
75874--- a/net/netfilter/ipvs/ip_vs_sync.c
75875+++ b/net/netfilter/ipvs/ip_vs_sync.c
75876@@ -649,7 +649,7 @@ control:
75877 * i.e only increment in_pkts for Templates.
75878 */
75879 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
75880- int pkts = atomic_add_return(1, &cp->in_pkts);
75881+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
75882
75883 if (pkts % sysctl_sync_period(ipvs) != 1)
75884 return;
75885@@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
75886
75887 if (opt)
75888 memcpy(&cp->in_seq, opt, sizeof(*opt));
75889- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
75890+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
75891 cp->state = state;
75892 cp->old_state = cp->state;
75893 /*
75894diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
75895index 7fd66de..e6fb361 100644
75896--- a/net/netfilter/ipvs/ip_vs_xmit.c
75897+++ b/net/netfilter/ipvs/ip_vs_xmit.c
75898@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
75899 else
75900 rc = NF_ACCEPT;
75901 /* do not touch skb anymore */
75902- atomic_inc(&cp->in_pkts);
75903+ atomic_inc_unchecked(&cp->in_pkts);
75904 goto out;
75905 }
75906
75907@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
75908 else
75909 rc = NF_ACCEPT;
75910 /* do not touch skb anymore */
75911- atomic_inc(&cp->in_pkts);
75912+ atomic_inc_unchecked(&cp->in_pkts);
75913 goto out;
75914 }
75915
75916diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
75917index 66b2c54..c7884e3 100644
75918--- a/net/netfilter/nfnetlink_log.c
75919+++ b/net/netfilter/nfnetlink_log.c
75920@@ -70,7 +70,7 @@ struct nfulnl_instance {
75921 };
75922
75923 static DEFINE_SPINLOCK(instances_lock);
75924-static atomic_t global_seq;
75925+static atomic_unchecked_t global_seq;
75926
75927 #define INSTANCE_BUCKETS 16
75928 static struct hlist_head instance_table[INSTANCE_BUCKETS];
75929@@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
75930 /* global sequence number */
75931 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
75932 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
75933- htonl(atomic_inc_return(&global_seq)));
75934+ htonl(atomic_inc_return_unchecked(&global_seq)));
75935
75936 if (data_len) {
75937 struct nlattr *nla;
75938diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
75939new file mode 100644
75940index 0000000..6905327
75941--- /dev/null
75942+++ b/net/netfilter/xt_gradm.c
75943@@ -0,0 +1,51 @@
75944+/*
75945+ * gradm match for netfilter
75946